metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jeongseunghyeon/py_certificates_tool",
"score": 2
}
|
#### File: src/views/main.py
```python
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QDateTime, QTimer
# from openssl_lib import OpenSSLLib
from .set_csr import SetCSRView
class CSRData:
def __init__(self):
self.country_name = ''
self.state_name = ''
self.locality_name = ''
self.org_name = ''
self.org_unit_name = ''
self.common_name = ''
self.email = ''
class MainView(QMainWindow):
def __init__(self):
super().__init__()
# UI Component Init
self.pfx_path = QLineEdit()
self.crt_path = QLineEdit()
self.key_path = QLineEdit()
self.cert_contents = QTextEdit()
# Variable
self.csr_data = CSRData()
self.datetime = QDateTime.currentDateTime()
self.datetime_label = ''
self.init_ui()
def init_ui(self):
self.init_menu_bar()
self.init_widget()
# Status Bar #
self.set_current_time()
qtimer = QTimer(self)
qtimer.timeout.connect(self.set_current_time)
qtimer.start(1000)
# Window #
self.setWindowTitle('Certificates Tool(Developed by <EMAIL>)')
self.resize(700, 600)
self.move_to_center()
self.setWindowIcon(QIcon('./image/icon.png'))
self.show()
def set_current_time(self):
current_date = QDateTime.currentDateTime()
self.datetime_label = f"Date : {current_date.toString('yyyy-MM-dd HH:mm:ss')}"
self.statusBar().showMessage(self.datetime_label)
def init_menu_bar(self):
# Top Menu Init #
exit_action = QAction('Exit', self)
exit_action.setShortcut('Ctrl+Q')
exit_action.setStatusTip('Exit application')
exit_action.triggered.connect(qApp.quit)
menu_bar = self.menuBar()
menu_bar.setNativeMenuBar(False)
file_menu = menu_bar.addMenu('&File')
file_menu.addAction(exit_action)
return
def init_widget(self):
self.setCentralWidget(QWidget())
cw = self.centralWidget()
grid = QGridLayout()
cw.setLayout(grid)
grid.addWidget(self.create_csr_group_layout(), 0, 0, 1, 6)
grid.addWidget(QLabel('PFX file : '), 1, 0, 1, 1)
grid.addWidget(QLabel('Crt file : '), 2, 0, 1, 1)
grid.addWidget(QLabel('Key file : '), 3, 0, 1, 1)
grid.addWidget(QLabel('Content : '), 4, 0, 1, 1)
self.pfx_path.setReadOnly(True)
self.crt_path.setReadOnly(True)
self.key_path.setReadOnly(True)
self.cert_contents.setReadOnly(True)
grid.addWidget(self.pfx_path, 1, 1, 1, 4)
grid.addWidget(self.crt_path, 2, 1, 1, 4)
grid.addWidget(self.key_path, 3, 1, 1, 4)
grid.addWidget(self.cert_contents, 6, 1, 1, 4)
pfx_file_btn = QPushButton('File Select', self)
pfx_file_btn.clicked.connect(self.onclick_crt_file_open_btn)
crt_file_btn = QPushButton('File Select', self)
crt_file_btn.clicked.connect(self.onclick_crt_file_open_btn)
key_file_btn = QPushButton('File Select', self)
key_file_btn.clicked.connect(self.onclick_key_file_open_btn)
grid.addWidget(pfx_file_btn, 1, 5, 1, 1)
grid.addWidget(crt_file_btn, 2, 5, 1, 1)
grid.addWidget(key_file_btn, 3, 5, 1, 1)
return
def create_csr_group_layout(self):
groupbox = QGroupBox('CSR Setting')
hbox = QHBoxLayout()
set_csr_btn = QPushButton('Set CSR Attributes')
set_csr_btn.clicked.connect(self.onclick_set_csr_btn)
hbox.addWidget(set_csr_btn)
save_csr_btn = QPushButton('Save CSR')
hbox.addWidget(save_csr_btn)
groupbox.setLayout(hbox)
return groupbox
def move_to_center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def onclick_set_csr_btn(self):
set_csr_view = SetCSRView()
res = set_csr_view.show_modal()
if res:
self.csr_data.country_name = set_csr_view.country_name.text()
self.csr_data.state_name = set_csr_view.state_name.text()
self.csr_data.locality_name = set_csr_view.locality_name.text()
self.csr_data.org_name = set_csr_view.org_name.text()
self.csr_data.org_unit_name = set_csr_view.org_unit_name.text()
self.csr_data.common_name = set_csr_view.common_name.text()
self.csr_data.email = set_csr_view.email.text()
def onclick_crt_file_open_btn(self):
file_name = QFileDialog.getOpenFileName(self)
if file_name[0]:
self.crt_path.setText(file_name[0])
f = open(file_name[0], 'r')
with f:
data = f.read()
self.cert_contents.setText(data)
def onclick_key_file_open_btn(self):
file_name = QFileDialog.getOpenFileName(self)
if file_name[0]:
self.key_path.setText(file_name[0])
f = open(file_name[0], 'r')
with f:
data = f.read()
self.cert_contents.setText(data)
```
|
{
"source": "Jeongseup/DACON_BitcoinTrader",
"score": 3
}
|
#### File: DACON_BitcoinTrader/codes/preprocessor.py
```python
import numpy as np
import pandas as pd
from statsmodels.tsa.api import SimpleExpSmoothing
from sklearn.preprocessing import KBinsDiscretizer
### ------------ Data preprocess part ------------ ###
def coin_index_export(input_array, coin_num):
''' ์ฝ์ธ๋ณ ์ธ๋ฑ์ค ๋ฝ๊ธฐ '''
index = []
sample_id_len = input_array.shape[0]
coin_num_col = 0
for sample_id in range(sample_id_len):
if input_array[sample_id, 0, coin_num_col] == coin_num:
#print(sample_id)
index.append(sample_id)
return index
def df2d_to_array3d(df_2d):
feature_size = df_2d.iloc[:,2:].shape[1]
time_size = len(df_2d.time.value_counts())
sample_size = len(df_2d.sample_id.value_counts())
array_3d = df_2d.iloc[:,2:].values.reshape([sample_size, time_size, feature_size])
return array_3d
def getWeights_FFD(d, size, thres):
''' ํจ์ ์ค๋ช
: ์ค์ ์ฐจ๋ถ์ ์ํ get weights '''
w = [1.] # w์ ์ด๊น๊ฐ = 1
for k in range(1, size):
w_ = -w[-1] * (d - k + 1) / k # ์ 2)๋ฅผ ์ฌ์ฉํ๋ค.
if abs(w[-1]) >= thres and abs(w_) <= thres:
break
else:
w.append(w_)
# w์ inverse
w = np.array(w[::-1]).reshape(-1, 1)
return w
def fracDiff_FFD(series, d, thres=0.002):
'''
ํจ์ ์ค๋ช
: ์ค์ ์ฐจ๋ถ
Constant width window (new solution)
Note 1: thres determines the cut-off weight for the window
Note 2: d can be any positive fractional, not necessarily bounded [0,1]
'''
# 1) Compute weights for the longest series
w = getWeights_FFD(d, series.shape[0], thres)
width = len(w) - 1
# 2) Apply weights to values
df = []
seriesF = series
for iloc in range(len(w), seriesF.shape[0]):
k = np.dot(w.T[::-1], seriesF[iloc - len(w):iloc])
df.append(k)
df = np.array(df)
return df, w
def FFD_smoothing(train_x_array):
''' ํจ์ ์ค๋ช
: ์ฐจ๋ถ ๋ฐ์ดํฐ ๋ฝ๊ธฐ '''
FFD_array = np.zeros((383, 1339, 1))
for x in range(383):
fdiff, w = fracDiff_FFD(train_x_array[x, :, 1], d=0.2, thres=0.002)
FFD_array[x]= fdiff
return FFD_array
def simple_exponetial_smoothing(arr, alpha=0.3):
y_series = list()
for temp_arr in arr:
target_series = temp_arr[:, 1].reshape(-1) # open col is 1 index
smoother = SimpleExpSmoothing(target_series, initialization_method="heuristic").fit(smoothing_level=0.3,optimized=False)
smoothing_series = smoother.fittedvalues
y_series.append(smoothing_series)
return np.array(y_series)
# ================================================= #
def simple_exponetial_smoothing_fory(arr, alpha=0.3):
y_series = list()
for temp_arr in arr:
target_series = temp_arr[:, 1].reshape(-1) # open col is 1 index
smoother = SimpleExpSmoothing(target_series, initialization_method="heuristic").fit(smoothing_level=alpha,optimized=False)
smoothing_series = smoother.fittedvalues
y_series.append(smoothing_series)
return np.array(y_series)
# ================================================= #
def simple_exponetial_smoothing_forX(arr, alpha=0.3):
# initialization
sample_size = int(arr.shape[0])
time_size = int(arr.shape[1])
feature_size = int(arr.shape[2])
# create empty array
smoothing_arr = np.zeros((sample_size, time_size, feature_size - 1))
for idx, temp_arr in enumerate(arr):
for col in range(1, feature_size): # open col is 1 index
if col < 5:
temp_series = temp_arr[:, col].reshape(-1)
smoother = SimpleExpSmoothing(temp_series, initialization_method="heuristic").fit(smoothing_level=0.3,optimized=False)
temp_smoothing_series = smoother.fittedvalues
smoothing_arr[idx, :, col-1] = temp_smoothing_series
else:
pass_series = temp_arr[:, col].reshape(-1)
smoothing_arr[idx, :, col-1] = pass_series
return smoothing_arr
# ================================================= #
def moving_average(arr, window_size = 20):
#length = ma ๋ช ํ ์ง
length = window_size
ma = np.zeros((arr.shape[0], arr.shape[1] - length, arr.shape[2]))
for idx in range(arr.shape[0]):
for i in range(length, arr.shape[1]):
for col in range(arr.shape[2]):
ma[idx, i-length, col] = arr[idx,i-length:i, col].mean() #open
return ma[:, :, 1] # open col is 1
def time_split(input_array, split_size = 6):
''' n๋ถ๋ด์ผ๋ก ๋ฐ์ดํฐ ๋๋๋ ํจ์ '''
# origin size define
index_size = input_array.shape[0]
origin_time_size = input_array.shape[1]
variable_size = input_array.shape[2]
# new array size define
new_time_size = int(origin_time_size/split_size) # 1380 / 6
new_array = np.zeros((index_size, new_time_size, variable_size))
for idx in range(index_size):
for time_idx in range(new_time_size):
first_time_idx = time_idx * split_size
last_time_idx = ((time_idx+1) * split_size) -1
new_array[idx, time_idx, 0] = input_array[idx, first_time_idx, 0] #coin_num
new_array[idx, time_idx, 1] = input_array[idx, first_time_idx, 1] #open
new_array[idx, time_idx, 2] = np.max(input_array[idx, first_time_idx:last_time_idx, 2]) #high
new_array[idx, time_idx, 3] = np.min(input_array[idx, first_time_idx:last_time_idx, 3]) #low
new_array[idx, time_idx, 4] = input_array[idx, last_time_idx, 4] #close
new_array[idx, time_idx, 5] = np.sum(input_array[idx, first_time_idx:last_time_idx, 5]) #etc
new_array[idx, time_idx, 6] = np.sum(input_array[idx, first_time_idx:last_time_idx, 6]) #etc
new_array[idx, time_idx, 7] = np.sum(input_array[idx, first_time_idx:last_time_idx, 7]) #etc
new_array[idx, time_idx, 8] = np.sum(input_array[idx, first_time_idx:last_time_idx, 8]) #etc
new_array[idx, time_idx, 9] = np.sum(input_array[idx, first_time_idx:last_time_idx, 9]) #etc
return new_array
### ---------------------------------------------- ###
def train_val_test_spliter(arr):
n = len(arr)
num_features = arr.shape[2] - 1
train_arr = arr[0:int(n*0.8), :, :]
val_arr = arr[int(n*0.8):, :, :]
n2 = len(train_arr) + len(val_arr)
print(
f'''
======================================================
Origin length is {n}, then total split length is {n2}
======================================================
train length is {train_arr.shape},
val length is {val_arr.shape},
num_features is ({num_features})
'''
)
return train_arr, val_arr
# study ํด๋ณด๊ณ ์ ๋๋ก ์ ์ด preprocess
def each_coin_normalization(train_x_arr):
''' ํจ์ ์ค๋ช
: ์ฝ์ธ๋ณ ๋ฐ์ดํฐ ์ ๊ทํ '''
# ์ ๋ํฌ ์ฝ์ธ ๋ฒํธ
unique_coin_index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
#create empty scaled list
scaled_train_x_arr = np.zeros((train_x_arr.shape[0], train_x_arr.shape[1], train_x_arr.shape[2]))
for temp_coin_num in unique_coin_index:
# ์ ๋ํฌ ์ฝ์ธ ๋ฒํธ ์ค ํ ์ฝ์ธ ๋ฒํธ์ฉ ํด๋น ์ฝ์ธ์ ๋ง๋ ์ธ๋ฑ์ค ์ถ์ถ
# ex) if temp_coin_num is 0, temp_coin_index = [3, 7, 8, 14...]
temp_coin_index = coin_index_export(train_x_arr, temp_coin_num)
# temp coin num array export
temp_x_arr = train_x_arr[temp_coin_index]
# initialization
num_sample = temp_x_arr.shape[0] # sample dim
num_sequence = temp_x_arr.shape[1] # time-sequence dim
num_feature = temp_x_arr.shape[2] # feature dim
# create emptpy scaler
temp_scaler = MinMaxScaler()
# ์๊ณ์ด์ ์ ํํ๋ฉด์ ํผํ
ํฉ๋๋ค
print('Current normalizing coin number is {}'.format(temp_coin_num))
for temp_sample, temp_index in enumerate(temp_coin_index):
temp_scaler.partial_fit(temp_x_arr[temp_sample, :, 5:]) # open =1, high = 2, low=3, close=4, volume=5 ~...
# ์ค์ผ์ผ๋ง(๋ณํ)ํฉ๋๋ค.
for temp_sample, temp_index in enumerate(temp_coin_index):
scaled_train_x_arr[temp_index, :, 5:] = temp_scaler.transform(temp_x_arr[temp_sample, :, 5:]).reshape(1, num_sequence, 5)
scaled_train_x_arr[temp_index, :, :5] = temp_x_arr[temp_sample, :, :5]
# save scaler for test arr
dir_name = './scaler'
file_name = f'coin_{temp_coin_num}_scaler.pkl'
save_path = os.path.join(dir_name, file_name)
joblib.dump(temp_scaler, save_path)
print("Each coin normalization, Complete!")
return scaled_train_x_arr
def kbin_discretizer(input_array):
kb = KBinsDiscretizer(n_bins=10, strategy='uniform', encode='ordinal')
processed_data = np.zeros((input_array.shape[0], input_array.shape[1], 1))
for i in range(input_array.shape[0]):
# coin_index_export args : (input_array, coin_num)
globals()['processing_array{}'.format(i)] = input_array[i,:,1]
#globals()['outliery_array{}'.format(i)] = train_y_array[outlier[i],:,1]
kb.fit(globals()['processing_array{}'.format(i)].reshape(input_array.shape[1],1))
globals()['processed_fit{}'.format(i)] = kb.transform(globals()['processing_array{}'.format(i)].reshape(input_array.shape[1],1))
#globals()['outliery_fit{}'.format(i)] = kb.transform(globals()['outliery_array{}'.format(i)].reshape(120,1))
processed_data[i,:,:] = globals()['processed_fit{}'.format(i)]
return processed_data
def outlier_detecter(raw_y_arr, outlier_criteria = 0.05):
open_arr = raw_y_arr[:, :, 1] #open col is 1
outlier_list = []
openrange_list = []
for idx, temp_arr in enumerate(open_arr):
temp_min = temp_arr.min()
temp_max = temp_arr.max()
temp_arr_range = temp_max - temp_min
openrange_list.append(temp_arr_range)
if temp_arr_range > outlier_criteria:
outlier_list.append(idx)
print(f'{idx}๋ฒ์งธ open series is outlier sample!')
print(f'temp array range is {temp_arr_range:.3}\n')
return outlier_list, np.array(openrange_list)
# ================================================= #
# ====== Generating Dataset ====== #
def data_generate(dataframe, x_frames, y_frames, print_mode = False):
''' ์ค๋ช
์๋ต '''
# grouping
grouped_df = dataframe.groupby('sample_id')
# export unique sample ids
unique_sample_id_list = grouped_df.sample_id.unique()
# create new lists
X, y = list(), list()
''' ์ํ ํ๋ ์ ํ loop '''
for sample_id in unique_sample_id_list:
# get one sample_id in sample list
temp_sample_id = sample_id.item()
# get one group by temp_sample_id
temp_df = grouped_df.get_group(temp_sample_id)
# ํ ์ํ๋น ๋ช ๊ฐ์ arrset๊ฐ ๋์ค๋ ์ง ํ์ธ
count = 0
split_length = len(temp_df) - (x_frames + y_frames) + 1
''' ํ ์ํ ๋ด ๋ฐ์ดํฐ split loop '''
for time_idx in range(split_length):
# index ๋ณ๊ฒฝ
time_idx += x_frames
# temp_data select
temp_arr = temp_df.iloc[time_idx - x_frames : time_idx + y_frames, 3:].values
# get values
temp_x = temp_arr[:x_frames, :]
temp_y = temp_arr[x_frames:, :]
# # 2d to 3d -> (255, 12) to (1, 255, 12) / (120, 12) to (1, 120, 12)
# temp_3d_x = np.expand_dims(temp_2d_x, axis = 0)
# temp_3d_y = np.expand_dims(temp_2d_y, axis = 0)
# appending
X.append(temp_x)
y.append(temp_y)
# counter printing
count += 1
if (count == split_length) & (print_mode == True):
print(f'ํ์ฌ sample id : {temp_sample_id}')
print(f'{temp_sample_id}๋ฒ์งธ sample์ ์์ฑ array์ : {count}')
return np.array(X), np.array(y)
# ====== Generating Dataset ====== #
def open_data_generate(dataframe, col_name, x_frames, y_frames, print_mode = False):
'''
print mode = True๋ก ๋ฐ๊พธ๋ฉด ๋ฐ์ดํฐ ์ด๋ป๊ฒ ๋ถํ ๋๋์ง ๋ณผ ์ ์์
example)
x_frames = 60
train_x, train_y = open_data_generate(train_df,col_name ='open', x_frames = x_frames, y_frames = 1, print_mode = False)
๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ ํ shape
train x shape is (288000, 60)
train y shape is (288000, 1)
'''
# grouping
grouped_df = dataframe.groupby('sample_id')
# export unique sample ids
unique_sample_id_list = grouped_df.sample_id.unique()
# create new lists
X, y = list(), list()
''' ์ํ ํ๋ ์ ํ loop '''
for sample_id in unique_sample_id_list:
# get one sample_id in sample list
temp_sample_id = sample_id.item()
# get one group by temp_sample_id
temp_series = grouped_df.get_group(temp_sample_id)[col_name]
# ํ ์ํ๋น ๋ช ๊ฐ์ arrset๊ฐ ๋์ค๋ ์ง ํ์ธ
count = 0
split_length = len(temp_series) - (x_frames + y_frames) + 1
''' ํ ์ํ ๋ด ๋ฐ์ดํฐ split loop '''
for time_idx in range(split_length):
# index ๋ณ๊ฒฝ
time_idx += x_frames
# temp_data select
temp_arr = temp_series[time_idx - x_frames : time_idx + y_frames]
# get values
temp_x, temp_y = temp_arr.iloc[:x_frames].values, temp_arr.iloc[x_frames:].values
# appending
X.append(temp_x)
y.append(temp_y)
# counter printing
count += 1
if (count == split_length) & (print_mode == True):
print(f'ํ์ฌ sample id : {temp_sample_id}')
print(f'{temp_sample_id}๋ฒ์งธ sample์ ์์ฑ array์ : {count}')
return np.array(X), np.array(y)
def targetframes_predict(model, dataframe, test_id = 1207, x_frames = 60 , target_len = 120):
'''
test_df์ test_id = 1207์ด ์ค์ ๋ฐ์ดํฐ์์์ 7657
target_len๋ ๋ฐ๋ณต ์์ธกํด์ผ ํ ๊ตฌ๊ฐ ๊ธธ์ด
example)
y_pred_arr, y_true_arr = targetframes_predict(model = model, dataframe = test_df, test_id = 1207, x_frames = x_frames , target_len = 120)
'''
# list ๋ง๋ค๊ณ , ๋ง์ง๋ง ๋ฐ์ดํฐ 30๊ฐ ์ถ์ถ
y_pred_list = list()
y_true_list = dataframe[dataframe.sample_id == test_id].reset_index(drop=True).open[1380:].values
x_input = dataframe[dataframe.sample_id == test_id].reset_index(drop=True).open[1380 - x_frames :1380].values
for i in range(target_len):
yhat = model.predict(x_input.reshape((1, x_frames, 1)), verbose=0)
# list append
y_pred = round(yhat.item(), 8)
y_pred_list.append(y_pred)
# exchage input data
x_input = np.append(x_input, y_pred)
x_input = np.delete(x_input, 0)
return np.array(y_pred_list), np.array(y_true_list)
def coindata_merger(train_x_df, train_y_df, coin_num):
# df์์ ์ง์ ๋ ์ฝ์ธ ๋ฐ์ดํฐ๋ง ์ถ์ถ
coin_num_x_df = train_x_df[train_x_df['coin_index'] == coin_num]
coin_num_y_df = train_y_df[train_y_df['coin_index'] == coin_num]
# y dataframe time value์ 1380 ์ฉ adding
coin_num_y_df.time = coin_num_y_df.time.copy() + 1380
# x,y df mergeํ๊ณ sample_id์ time ์์ผ๋ก sorting
merged_df = pd.concat([coin_num_x_df, coin_num_y_df])
merged_df = merged_df.sort_values(by = ['sample_id','time']).reset_index(drop=True)
# sample_id series orderly indexing
sample_id_series = merged_df.sample_id.value_counts().reset_index().rename(columns = {"index" : "sample_id"})
reset_index_series = sample_id_series.iloc[:,:1].sort_values(by = ['sample_id']).reset_index(drop=True)
# coin index file export
coin_index = reset_index_series.reset_index().set_index('sample_id')
coin_index_name = f'./coin_{coin_num}_index.json'
coin_index.to_json(coin_index_name, orient="table", indent=4)
# dict index
new_sample_dict = reset_index_series.reset_index().set_index('sample_id').to_dict()
# sample_id value initialization
merged_df['sample_id'] = merged_df['sample_id'].map(new_sample_dict['index'])
merged_df.to_hdf('./data/merged_data.h5', key = 'merged_df')
return merged_df
```
|
{
"source": "jeongsong97/PsychoPy",
"score": 3
}
|
#### File: jeongsong97/PsychoPy/Template.py
```python
from psychopy import core, visual, event
from psychopy.hardware import keyboard
import pandas as pd
import csv
import psychopy.clock
import psychopy.event
import random
import os
win = visual.Window([800,600], fullscr=False, monitor="testMonitor")
myMouse = event.Mouse(visible=False)
finalTable=[['image','Species', 'RespTime','Answer', 'Correct']]
message1= visual.TextStim(win, pos=[0,+0.1],text='Enter the participant number:')
message1.draw()
win.flip()
answer = ''
def show_cross(time):
cross = visual.TextStim(win, text='+')
cross.draw()
win.flip()
core.wait(time)
def block():
grating = psychopy.visual.TextStim(win=win, text ='T')
# Draw the stimulus to the window. We always draw at the back buffer of the window.
grating.draw()
# Flip back buffer and front buffer of the window.
win.flip()
# Pause 0.5 s, so you get a chance to see it!
core.wait(0.5)
show_cross(0.5)
choice = random.shuffle([0, 1])
stimClock = core.Clock()
'''
key = psychopy.event.getKeys(keyList =['2','3'], timeStamped = stimClock)
if len(key)>0:
ans=key[len(key) -1]
Resp_Time=ans[1]
x = ans[0]
if x=='2':
Answer="1"
if random_list [i] == 1:
Right = "Correct"
if random_list [i] == 0:
Right = "Incorrect"
else:
Answer = '0'
Resp_Time = 0
if random_list [i] == 1:
Right = "Incorrect"
if random_list [i] == 0:
Right = "Correct"
row=[question, species, Resp_Time, Answer,Right]
finalTable.append(row)
'''
cross = visual.TextStim(win, text='+')
# Draw the stimulus to the window. We always draw at the back buffer of the window.
cross.draw()
# Flip back buffer and front buffer of the window.
win.flip()
core.wait(0.75)
thisResp=None
while thisResp==None:
allKeys=event.waitKeys()
for thisKey in allKeys:
if thisKey=='space':
thisResp=1
elif thisKey=='return':
thisResp=1
elif thisKey=='backspace':
answer=answer[:-1]
message1.draw()
message2 = visual.TextStim(win, pos=[0,-0.1],text=answer)
message2.draw()
win.flip()
else:
answer += thisKey
message1.draw()
message2 = visual.TextStim(win, pos=[0,-0.1],text=answer)
message2.draw()
win.flip()
for i in range(8):
block()
# filename = 'images_'+answer+'.csv'
"""
with open(filename, 'a+', newline='') as file:
writer = csv.writer(file)
writer.writerows(finalTable)
"""
core.quit()
```
|
{
"source": "JeongsooHa/char-rnn-tensorflow",
"score": 2
}
|
#### File: JeongsooHa/char-rnn-tensorflow/train.py
```python
import tensorflow as tf
import numpy as np
from utils import TextLoader
# ํ์ต์ ํ์ํ ์ค์ ๊ฐ๋ค์ ์ง์ ํฉ๋๋ค.
data_dir = 'data/tinyshakespeare' # ์
ฐ์ต์คํผ์ด ํฌ๊ณก <๋ฆฌ์ฒ๋ 3์ธ> ๋ฐ์ดํฐ๋ก ํ์ต
#data_dir = 'data/linux' # <Linux ์์ค์ฝ๋> ๋ฐ์ดํฐ๋ก ํ์ต
batch_size = 50 # Training : 50, Sampling : 1
seq_length = 50 # Training : 50, Sampling : 1
hidden_size = 128 # ํ๋ ๋ ์ด์ด์ ๋
ธ๋ ๊ฐ์
learning_rate = 0.002
num_epochs = 2
num_hidden_layers = 2
grad_clip = 5 # Gradient Clipping์ ์ฌ์ฉํ ์๊ณ๊ฐ
# TextLoader๋ฅผ ์ด์ฉํด์ ๋ฐ์ดํฐ๋ฅผ ๋ถ๋ฌ์ต๋๋ค.
data_loader = TextLoader(data_dir, batch_size, seq_length)
# ํ์ต๋ฐ์ดํฐ์ ํฌํจ๋ ๋ชจ๋ ๋จ์ด๋ค์ ๋ํ๋ด๋ ๋ณ์์ธ chars์ chars์ id๋ฅผ ๋ถ์ฌํด dict ํํ๋ก ๋ง๋ vocab์ ์ ์ธํฉ๋๋ค.
chars = data_loader.chars
vocab = data_loader.vocab
vocab_size = data_loader.vocab_size # ์ ์ฒด ๋จ์ด๊ฐ์
# ์ธํ๋ฐ์ดํฐ์ ํ๊ฒ๋ฐ์ดํฐ, ๋ฐฐ์น ์ฌ์ด์ฆ๋ฅผ ์
๋ ฅ๋ฐ๊ธฐ ์ํ ํ๋ ์ด์คํ๋๋ฅผ ์ค์ ํฉ๋๋ค.
input_data = tf.placeholder(tf.int32, shape=[None, None]) # input_data : [batch_size, seq_length])
target_data = tf.placeholder(tf.int32, shape=[None, None]) # target_data : [batch_size, seq_length])
state_batch_size = tf.placeholder(tf.int32, shape=[]) # Training : 50, Sampling : 1
# RNN์ ๋ง์ง๋ง ํ๋ ๋ ์ด์ด์ ์ถ๋ ฅ์ ์ํํธ๋งฅ์ค ์ถ๋ ฅ๊ฐ์ผ๋ก ๋ณํํด์ฃผ๊ธฐ ์ํ ๋ณ์๋ค์ ์ ์ธํฉ๋๋ค.
# hidden_size -> vocab_size
softmax_w = tf.Variable(tf.random_normal(shape=[hidden_size, vocab_size]), dtype=tf.float32)
softmax_b = tf.Variable(tf.random_normal(shape=[vocab_size]), dtype=tf.float32)
# num_hidden_layers๋งํผ LSTM cell(ํ๋ ๋ ์ด์ด)๋ฅผ ์ ์ธํฉ๋๋ค.
cells = []
for _ in range(0, num_hidden_layers):
cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_size)
cells.append(cell)
# cell์ ์ข
ํฉํด์ RNN์ ์ ์ํฉ๋๋ค.
cell = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
# ์ธํ๋ฐ์ดํฐ๋ฅผ ๋ณํํ๊ธฐ ์ํ Embedding Matrix๋ฅผ ์ ์ธํฉ๋๋ค.
# vocab_size -> hidden_size
embedding = tf.Variable(tf.random_normal(shape=[vocab_size, hidden_size]), dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, input_data)
# ์ด๊ธฐ state ๊ฐ์ 0์ผ๋ก ์ด๊ธฐํํฉ๋๋ค.
initial_state = cell.zero_state(state_batch_size, tf.float32)
# ํ์ต์ ์ํ tf.nn.dynamic_rnn์ ์ ์ธํฉ๋๋ค.
# outputs : [batch_size, seq_length, hidden_size]
outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, initial_state=initial_state, dtype=tf.float32)
# ouputs์ [batch_size * seq_length, hidden_size]] ํํ๋ก ๋ฐ๊ฟ๋๋ค.
output = tf.reshape(outputs, [-1, hidden_size])
# ์ต์ข
์ถ๋ ฅ๊ฐ์ ์ค์ ํฉ๋๋ค.
# logits : [batch_size * seq_length, vocab_size]
logits = tf.matmul(output, softmax_w) + softmax_b
probs = tf.nn.softmax(logits)
# Cross Entropy ์์ค ํจ์๋ฅผ ์ ์ํฉ๋๋ค.
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target_data))
# ์ตํฐ๋ง์ด์ ๋ฅผ ์ ์ธํ๊ณ ์ตํฐ๋ง์ด์ ์ Gradient Clipping์ ์ ์ฉํฉ๋๋ค.
# grad_clip(=5)๋ณด๋ค ํฐ Gradient๋ฅผ 5๋ก Clippinํฉ๋๋ค.
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_step = optimizer.apply_gradients(zip(grads, tvars))
# ์ธ์
์ ์ด๊ณ ํ์ต์ ์งํํฉ๋๋ค.
with tf.Session() as sess:
# ๋ณ์๋ค์ ์ด๊ธฐ๊ฐ์ ํ ๋นํฉ๋๋ค.
sess.run(tf.global_variables_initializer())
for e in range(num_epochs):
data_loader.reset_batch_pointer()
# ์ด๊ธฐ ์ํ๊ฐ์ ์ง์ ํฉ๋๋ค.
state = sess.run(initial_state, feed_dict={state_batch_size : batch_size})
for b in range(data_loader.num_batches):
# x, y ๋ฐ์ดํฐ๋ฅผ ๋ถ๋ฌ์ต๋๋ค.
x, y = data_loader.next_batch()
# y์ one_hot ์ธ์ฝ๋ฉ์ ์ ์ฉํฉ๋๋ค.
y = tf.one_hot(y, vocab_size) # y : [batch_size, seq_length, vocab_size]
y = tf.reshape(y, [-1, vocab_size]) # y : [batch_size * seq_length, vocab_size]
y = y.eval()
# feed-dict์ ์ฌ์ฉํ ๊ฐ๋ค๊ณผ LSTM ์ด๊ธฐ cell state(feed_dict[c])๊ฐ๊ณผ hidden layer ์ถ๋ ฅ๊ฐ(feed_dict[h])์ ์ง์ ํฉ๋๋ค.
feed_dict = {input_data : x, target_data: y, state_batch_size : batch_size}
for i, (c, h) in enumerate(initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
# ํ์คํ
ํ์ต์ ์งํํฉ๋๋ค.
_, loss_print, state = sess.run([train_step, loss, final_state], feed_dict=feed_dict)
print("{}(ํ์ตํ ๋ฐฐ์น๊ฐ์)/{}(ํ์ตํ ๋ฐฐ์น๊ฐ์), ๋ฐ๋ณต(epoch): {}, ์์คํจ์(loss): {:.3f}".format(
e * data_loader.num_batches + b,
num_epochs * data_loader.num_batches,
(e+1),
loss_print))
print("ํธ๋ ์ด๋์ด ๋๋ฌ์ต๋๋ค!")
# ์ํ๋ง ์์
print("์ํ๋ง์ ์์ํฉ๋๋ค!")
num_sampling = 4000 # ์์ฑํ ๊ธ์(Character)์ ๊ฐ์๋ฅผ ์ง์ ํฉ๋๋ค.
prime = u' ' # ์์ ๊ธ์๋ฅผ ' '(๊ณต๋ฐฑ)์ผ๋ก ์ง์ ํฉ๋๋ค.
sampling_type = 1 # ์ํ๋ง ํ์
์ ์ค์ ํฉ๋๋ค.
state = sess.run(cell.zero_state(1, tf.float32)) # RNN์ ์ต์ด state๊ฐ์ 0์ผ๋ก ์ด๊ธฐํํฉ๋๋ค.
# Random Sampling์ ์ํ weighted_pick ํจ์๋ฅผ ์ ์ํฉ๋๋ค.
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
ret = prime # ์ํ๋ง ๊ฒฐ๊ณผ๋ฅผ ๋ฆฌํด๋ฐ์ ret ๋ณ์์ ์ฒซ๋ฒ์งธ ๊ธ์๋ฅผ ํ ๋นํฉ๋๋ค.
char = prime[-1] # Char-RNN์ ์ฒซ๋ฒ์จฐ ์ธํ์ ์ง์ ํฉ๋๋ค.
for n in range(num_sampling):
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
# RNN์ ํ์คํ
์คํํ๊ณ Softmax ํ๋ ฌ์ ๋ฆฌํด์ผ๋ก ๋ฐ์ต๋๋ค.
feed_dict = {input_data: x, state_batch_size : 1, initial_state: state}
[probs_result, state] = sess.run([probs, final_state], feed_dict=feed_dict)
# ๋ถํ์ํ ์ฐจ์์ ์ ๊ฑฐํฉ๋๋ค.
# probs_result : (1,65) -> p : (65)
p = np.squeeze(probs_result)
# ์ํ๋ง ํ์
์ ๋ฐ๋ผ 3๊ฐ์ง ์ข
๋ฅ๋ก ์ํ๋ง ํฉ๋๋ค.
# sampling_type : 0 -> ๋ค์ ๊ธ์๋ฅผ ์์ธกํ ๋ ํญ์ argmax๋ฅผ ์ฌ์ฉ
# sampling_type : 1(defualt) -> ๋ค์ ๊ธ์๋ฅผ ์์ธกํ ๋ ํญ์ random sampling์ ์ฌ์ฉ
# sampling_type : 2 -> ๋ค์ ๊ธ์๋ฅผ ์์ธกํ ๋ ์ด์ ๊ธ์๊ฐ ' '(๊ณต๋ฐฑ)์ด๋ฉด random sampling, ๊ทธ๋ ์ง ์์ ๊ฒฝ์ฐ argmax๋ฅผ ์ฌ์ฉ
if sampling_type == 0:
sample = np.argmax(p)
elif sampling_type == 2:
if char == ' ':
sample = weighted_pick(p)
else:
sample = np.argmax(p)
else:
sample = weighted_pick(p)
pred = chars[sample]
ret += pred # ์ํ๋ง ๊ฒฐ๊ณผ์ ํ์ฌ ์คํ
์์ ์์ธกํ ๊ธ์๋ฅผ ์ถ๊ฐํฉ๋๋ค. (์๋ฅผ๋ค์ด pred=L์ผ ๊ฒฝ์ฐ, ret = HEL -> HELL)
char = pred # ์์ธกํ ๊ธ์๋ฅผ ๋ค์ RNN์ ์ธํ์ผ๋ก ์ฌ์ฉํฉ๋๋ค.
print("์ํ๋ง ๊ฒฐ๊ณผ:")
print(ret)
```
|
{
"source": "JeongsooHa/handRNN",
"score": 3
}
|
#### File: JeongsooHa/handRNN/make.py
```python
import pickle
import sys
import tensorflow as tf
from tqdm import tqdm
def get_labels():
"""Return a list of our trained labels so we can
test our training accuracy. The file is in the
format of one label per line, in the same order
as the predictions are made. The order can change
between training runs."""
with open("./inception/retrained_labels.txt", 'r') as fin:
labels = [line.rstrip('\n') for line in fin]
return labels
def predict_on_frames(frames, batch):
"""Given a list of frames, predict all their classes."""
# Unpersists graph from file
with tf.gfile.FastGFile("./inception/retrained_graph.pb", 'rb') as fin:
graph_def = tf.GraphDef()
graph_def.ParseFromString(fin.read())
_ = tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
frame_predictions = []
image_path = './data/images/' + batch + '/'
pbar = tqdm(total=len(frames))
for i, frame in enumerate(frames):
filename = frame[0]
label = frame[1]
# Get the image path.
image = image_path + filename + '.jpg'
# Read in the image_data
image_data = tf.gfile.FastGFile(image, 'rb').read()
try:
predictions = sess.run(
softmax_tensor,
{'DecodeJpeg/contents:0': image_data}
)
prediction = predictions[0]
except KeyboardInterrupt:
print("You quit with ctrl+c")
sys.exit()
except:
print("Error making prediction, continuing.")
continue
# Save the probability that it's each of our classes.
frame_predictions.append([prediction, label])
if i > 0 and i % 10 == 0:
pbar.update(10)
pbar.close()
return frame_predictions
def get_accuracy(predictions, labels):
"""After predicting on each batch, check that batch's
accuracy to make sure things are good to go. This is
a simple accuracy metric, and so doesn't take confidence
into account, which would be a better metric to use to
compare changes in the model."""
correct = 0
for frame in predictions:
# Get the highest confidence class.
this_prediction = frame[0].tolist()
this_label = frame[1]
max_value = max(this_prediction)
max_index = this_prediction.index(max_value)
predicted_label = labels[max_index]
# Now see if it matches.
if predicted_label == this_label:
correct += 1
accuracy = correct / len(predictions)
return accuracy
def main():
batches = ['1']
labels = get_labels()
for batch in batches:
print("Doing batch %s" % batch)
with open('data/labeled-frames-' + batch + '.pkl', 'rb') as fin:
frames = pickle.load(fin)
# Predict on this batch and get the accuracy.
predictions = predict_on_frames(frames, batch)
accuracy = get_accuracy(predictions, labels)
print("Batch accuracy: %.5f" % accuracy)
# Save it.
with open('data/predicted-frames-' + batch + '.pkl', 'wb') as fout:
pickle.dump(predictions, fout)
print("Done.")
if __name__ == '__main__':
main()
```
|
{
"source": "JeongsooHa/ray",
"score": 3
}
|
#### File: serve/examples/echo_actor_batch.py
```python
import time
import requests
import ray
from ray import serve
from ray.serve.utils import pformat_color_json
from ray.serve import BackendConfig
class MagicCounter:
def __init__(self, increment):
self.increment = increment
@serve.accept_batch
def __call__(self, flask_request_list, base_number=None):
# batch_size = serve.context.batch_size
if serve.context.web:
result = []
for flask_request in flask_request_list:
base_number = int(flask_request.args.get("base_number", "0"))
result.append(base_number)
return list(map(lambda x: x + self.increment, result))
else:
result = []
for b in base_number:
ans = b + self.increment
result.append(ans)
return result
serve.init(blocking=True)
serve.create_endpoint("magic_counter", "/counter")
b_config = BackendConfig(max_batch_size=5)
serve.create_backend(
MagicCounter, "counter:v1", 42, backend_config=b_config) # increment=42
serve.set_traffic("magic_counter", {"counter:v1": 1.0})
print("Sending ten queries via HTTP")
for i in range(10):
url = "http://1192.168.127.12:8000/counter?base_number={}".format(i)
print("> Pinging {}".format(url))
resp = requests.get(url).json()
print(pformat_color_json(resp))
time.sleep(0.2)
print("Sending ten queries via Python")
handle = serve.get_handle("magic_counter")
for i in range(10):
print("> Pinging handle.remote(base_number={})".format(i))
result = ray.get(handle.remote(base_number=i))
print("< Result {}".format(result))
```
|
{
"source": "jeongsoopark/Falcor",
"score": 2
}
|
#### File: Mogwai/Testing/testSSAO.py
```python
def render_graph_testSSAO():
testSSAO = RenderGraph("ForwardRenderer")
DepthPass = RenderPass("DepthPass", {'depthFormat': ResourceFormat.D32Float})
testSSAO.addPass(DepthPass, "DepthPass")
SkyBox = RenderPass("SkyBox")
testSSAO.addPass(SkyBox, "SkyBox")
ForwardLightingPass = RenderPass("ForwardLightingPass", {'sampleCount': 1, 'enableSuperSampling': False})
testSSAO.addPass(ForwardLightingPass, "ForwardLightingPass")
SSAOPass = RenderPass("SSAOPass")
testSSAO.addPass(SSAOPass, "SSAO")
testSSAO.addEdge("DepthPass.depth", "ForwardLightingPass.depth")
testSSAO.addEdge("DepthPass.depth", "SkyBox.depth")
testSSAO.addEdge("SkyBox.target", "ForwardLightingPass.color")
testSSAO.addEdge("DepthPass.depth", "SSAO.depth")
testSSAO.addEdge("ForwardLightingPass.color", "SSAO.colorIn")
testSSAO.markOutput("SSAO.colorOut")
return testSSAO
test_SSAO = render_graph_testSSAO()
try: m.addGraph(test_SSAO)
except NameError: None
```
#### File: Tests/Old/StartBuildTest.py
```python
import urllib.parse
import urllib.request
import urllib.error
import getpass
import argparse
import xml.etree.ElementTree as ET
import TeamCityCommon
from TeamCityCommon import connect
from TeamCityCommon import server_url
from TeamCityCommon import project_url
from TeamCityCommon import connect
import base64
import os
import GetBuildStatus
default_xml_file = './build.xml'
queue_url = 'app/rest/buildQueue?locator=project:Falcor'
def start_build_internal(xml_data):
TeamCityCommon.post_request(queue_url, xml_data, 'application/xml')
def start_build(username, xml_file_path, branch_name, git_path, tests_directory, buildTypeId):
file = open(xml_file_path, 'rt')
data = file.read()
file.close()
# insert branch name into
xml = ET.fromstring(data)
print('Starting remote build with id: ' + buildTypeId)
# insert branch name into correct location
for data in xml.iter():
if data.get('branchName'):
data.set('branchName', branch_name)
if buildTypeId:
if data.get('id'):
data.set('id', buildTypeId)
for param in data.findall('property'):
if ( param.get('name') == 'branchname'):
param.set('value', branch_name)
if param.get('name') == 'tests_directory':
if tests_directory:
param.set('value', '--tests_directory ' + tests_directory )
if (param.get('name') == 'vcsRoot'):
# get vsc root list from teamcity
# find id from config
please = GetBuildStatus.get_vcs_instances()
string = str(please.read().decode())
vcs_xml = ET.fromstring(string)
vcs_id = ''
for node in vcs_xml.iter():
for instance in node.findall('vcs-root-instance'):
if instance.get('name').startswith(git_path):
vcs_id = instance.get('vcs-root-id')
set = True
break;
param.set('value', vcs_id)
# convert back to string to be sent in post request
xml_data = ET.tostring(xml)
start_build_internal(xml_data)
def main():
# Argument Parser.
parser = argparse.ArgumentParser()
# Adds argument for username to connect as
parser.add_argument('-u', '--username', action='store', help='UserName To Connect.');
# Adds argument for specifying wich xml to use for the build settings
parser.add_argument('-xml', '--xml_filename', action='store', help='XML file to send in POST request for build.')
# Parse the Arguments.
args = parser.parse_args()
if args.username:
username = args.username
else:
username = input('Enter username for teamcity.nvidia.com: ')
connect(username)
start_build(username, xml_file = default_xml_file)
if __name__ == '__main__':
main()
```
|
{
"source": "jeongukjae/bert-optimization",
"score": 2
}
|
#### File: bert-optimization/bert_optimization/metrics.py
```python
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import generic_utils, metrics_utils
class F1Score(tf.keras.metrics.Metric):
def __init__(self, thresholds=None, top_k=None, class_id=None, name=None, dtype=None):
super().__init__(name=name, dtype=dtype)
self.top_k = top_k
self.class_id = class_id
self.thresholds = metrics_utils.parse_init_thresholds(thresholds, default_threshold=0.5)
self.true_positives = self.add_weight("true_positives", shape=(len(self.thresholds),), initializer="zeros")
self.false_positives = self.add_weight("false_positives", shape=(len(self.thresholds),), initializer="zeros")
self.false_negatives = self.add_weight("false_negatives", shape=(len(self.thresholds),), initializer="zeros")
def update_state(self, y_true, y_pred, sample_weight=None):
metrics_utils.update_confusion_matrix_variables(
{
metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives,
metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives,
metrics_utils.ConfusionMatrix.FALSE_NEGATIVES: self.false_negatives,
},
y_true,
y_pred,
thresholds=self.thresholds,
top_k=self.top_k,
class_id=self.class_id,
sample_weight=sample_weight,
)
def result(self):
precision = tf.math.divide_no_nan(self.true_positives, self.true_positives + self.false_positives)
recall = tf.math.divide_no_nan(self.true_positives, self.true_positives + self.false_negatives)
result = tf.math.divide_no_nan(2 * (precision * recall), (precision + recall))
return result[0] if len(self.thresholds) == 1 else result
def reset_states(self):
num_thresholds = len(generic_utils.to_list(self.thresholds))
K.batch_set_value([(v, np.zeros((num_thresholds,))) for v in self.variables])
def get_config(self):
config = {"thresholds": self.init_thresholds, "top_k": self.top_k, "class_id": self.class_id}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
```
#### File: bert_optimization/models/bert.py
```python
import json
import tensorflow as tf
from . import models_utils
from .layer_normalization import LayerNormalization
from .transformer import TransformerEncoder
class BertConfig:
"""
Configuration of BertModel
vocab_size: Size of Vocab
hidden_size: Hidden size used in embedding, MHA, pooling layers
num_hidden_layers: # of transformer encoder layer
num_attention_heads: # of attention heads in MHA
intermediate_size: Intermediate size used in MHA
hidden_act: Activation function used in transformer encoder layer
hidden_dropout_prob: Dropout prob
attention_probs_dropout_prob: Attention Dropout prob
max_position_embeddings: Max Position Embeddings
type_vocab_size: Vocab Type (2 => Sentence A/B)
output_hidden_states: A flag for BertModel to return hidden_states
output_embedding: A flag for BertModel to return embedding
"""
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: str = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
output_hidden_states: bool = True,
output_embedding: bool = True,
use_splitted: bool = False,
aware_quantization: bool = False,
**kwargs, # unused
):
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.max_position_embeddings = max_position_embeddings
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.type_vocab_size = type_vocab_size
self.vocab_size = vocab_size
self.output_hidden_states = output_hidden_states
self.output_embedding = output_embedding
self.use_splitted = use_splitted
self.aware_quantization = aware_quantization
@staticmethod
def from_json(path: str, **kwargs) -> "BertConfig":
with open(path, "r") as f:
file_content = json.load(f)
return BertConfig(**file_content, **kwargs)
class BertModel(tf.keras.layers.Layer):
"""
Base Bert Model: https://arxiv.org/abs/1810.04805
Input Shape:
input_ids: (Batch Size, Sequence Length)
token_type_ids:: (Batch Size, Sequence Length)
attention_mask:: (Batch Size, Sequence Length)
head_mask: (Batch Size, Num Layers, Num Heads) -> https://arxiv.org/abs/1905.10650
Output Shape:
sequence_output: (Batch Size, Sequence Length, Hidden Size)
pooled_output: (Batch Size, Hidden Size)
embeddings: (Batch Size, Sequence Length, Hidden Size)
hidden_states: (Num Layers, Batch Size, Sequence Length, Hidden Size)
hidden_states is a "num layers"-length list of tensor that has shape of (Batch Size, Sequence Length, Hidden Size)
"""
def __init__(self, config: BertConfig):
super(BertModel, self).__init__()
embedding_component = models_utils.get_embedding(config.aware_quantization)
self.token_embeddings = embedding_component(config.vocab_size, config.hidden_size)
self.token_type_embeddings = embedding_component(config.type_vocab_size, config.hidden_size)
self.position_embeddings = embedding_component(config.max_position_embeddings, config.hidden_size)
self.embedding_layer_norm = LayerNormalization()
self.embedding_dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.encoders = [
TransformerEncoder(
config.num_attention_heads,
config.hidden_size,
config.intermediate_size,
config.attention_probs_dropout_prob,
config.hidden_act,
config.aware_quantization,
config.use_splitted,
)
for _ in range(config.num_hidden_layers)
]
self.pooler_layer = tf.keras.layers.Dense(config.hidden_size)
self.output_hidden_states = config.output_hidden_states
self.output_embedding = config.output_embedding
self.num_layers = config.num_hidden_layers
def call(self, input_tensors, head_mask=None):
assert len(input_tensors) == 3
input_ids, token_type_ids, attention_mask = input_tensors
seq_length = tf.shape(input_ids)[1]
position_ids = tf.range(tf.constant(0), seq_length, tf.constant(1), dtype=tf.dtypes.int32)
words_embeddings = self.token_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
embeddings = self.embedding_layer_norm(embeddings)
embeddings = self.embedding_dropout(embeddings)
hidden_state = embeddings
hidden_states = tf.TensorArray(tf.float32, size=self.num_layers)
for index in range(self.num_layers):
head_mask_by_encoder = None if head_mask is None else head_mask[:, index, :]
hidden_state = self.encoders[index](hidden_state, mask=attention_mask, head_mask=head_mask_by_encoder)
if self.output_hidden_states:
hidden_states.write(index, hidden_state)
sequence_output = hidden_state
pooled_output = tf.nn.tanh(self.pooler_layer(sequence_output[:, 0]))
outputs = (sequence_output, pooled_output)
if self.output_embedding:
outputs += (embeddings,)
if self.output_hidden_states:
outputs += (hidden_states.stack(),)
return outputs
```
#### File: bert_optimization/models/models_utils.py
```python
import tensorflow as tf
from .quantized_model import QuantizedDense, QuantizedEmbedding
def get_dense(aware_quantization: bool):
if aware_quantization:
return QuantizedDense
return tf.keras.layers.Dense
def get_embedding(aware_quantization: bool):
if aware_quantization:
return QuantizedEmbedding
return tf.keras.layers.Embedding
```
#### File: jeongukjae/bert-optimization/quantization.py
```python
import pathlib
import tensorflow as tf
from bert_optimization.models.heads import BertForClassificationToQuant
from bert_optimization.models import BertConfig
@tf.function
def build_bert_model_graph(bert_model: BertForClassificationToQuant, bert_config: BertConfig):
token_ids = tf.keras.Input((48,), dtype=tf.int32)
token_type_ids = tf.keras.Input((48,), dtype=tf.int32)
attention_mask = tf.keras.Input((48,), dtype=tf.float32)
bert_model([token_ids, token_type_ids, attention_mask])
bert_config = BertConfig.from_json("./tmp/bert_config.json")
model = tf.keras.models.Sequential([BertForClassificationToQuant(bert_config, 2)])
build_bert_model_graph(model, bert_config)
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model_quant = converter.convert()
tflite_models_dir = pathlib.Path("./tmp/")
tflite_models_dir.mkdir(exist_ok=True, parents=True)
tflite_model_file = tflite_models_dir / "bert_model.tflite"
print(tflite_model_file.write_bytes(tflite_model_quant))
print(tflite_model_file)
```
#### File: tests/models/test_bert.py
```python
import pytest
import tensorflow as tf
from bert_optimization.models.bert import BertConfig, BertModel
@pytest.fixture
def bert_config():
# use smaller dimension for faster unit tests
return BertConfig(100, intermediate_size=128)
@pytest.mark.parametrize("batch_size, seq_len", [pytest.param(1, 3), pytest.param(3, 12)])
def test_shapes_of_bert_model_outputs(bert_config: BertConfig, batch_size: int, seq_len: int):
"""Check shapes of BERT model outputs"""
# force to set output embedding and hidden states to True
bert_config.output_embedding = True
bert_config.output_hidden_states = True
bert = BertModel(bert_config)
input_ids = tf.random.uniform((batch_size, seq_len), maxval=bert_config.vocab_size)
token_type_ids = tf.random.uniform((batch_size, seq_len), maxval=bert_config.type_vocab_size)
attention_mask = tf.cast(tf.random.uniform((batch_size, seq_len), maxval=2), tf.dtypes.float32)
outputs = bert([input_ids, token_type_ids, attention_mask])
assert len(outputs) == 4
assert outputs[0].shape == (batch_size, seq_len, bert_config.hidden_size) # sequence output
assert outputs[1].shape == (batch_size, bert_config.hidden_size) # pooled output
assert outputs[2].shape == (batch_size, seq_len, bert_config.hidden_size) # embeddings
assert all(hidn.shape == (batch_size, seq_len, bert_config.hidden_size) for hidn in outputs[3]) # hidden states
```
#### File: bert-optimization/tests/test_metrics.py
```python
import tensorflow as tf
from bert_optimization.metrics import F1Score
def test_f1_correctness():
f1_score = F1Score()
# zero division case
f1_score.update_state(tf.constant([0, 0, 0, 0, 0, 0]), tf.constant([0, 0, 0, 0, 0, 0]))
assert f1_score.result().shape == tuple()
assert f1_score.result() == 0.0
f1_score.reset_states()
f1_score.update_state(tf.constant([1, 0, 0, 1, 0, 0]), tf.constant([0, 1, 0, 1, 1, 0]))
assert f1_score.result().shape == tuple()
assert f1_score.result() == 0.4
f1_score.update_state(tf.constant([0, 0, 0, 0, 0, 0]), tf.constant([0, 0, 0, 0, 0, 0]))
assert f1_score.result().shape == tuple()
assert f1_score.result() != 0.0
```
#### File: bert-optimization/tools/masking_each_heads.py
```python
import logging
import os
import sys
import tensorflow as tf
from bert_optimization import glue_processor, models, tokenizer, utils
from bert_optimization.glue_processor import convert_sentence_pair, convert_single_sentence
PROCESSOR_BY_TASK = {
"cola": glue_processor.CoLAProcessor,
"mrpc": glue_processor.MRPCProcessor,
"mnli": glue_processor.MNLIProcessor,
"sst-2": glue_processor.SST2Processor,
"rte": glue_processor.RTEProcessor,
"qqp": glue_processor.QQPProcessor,
}
def get_total_batches(dataset_size, batch_size):
return dataset_size // batch_size + bool(dataset_size % batch_size)
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter("%(asctime)s: [%(levelname)s] %(message)s"))
logger.addHandler(handler)
parser = utils.get_default_bert_argument_parser()
args = parser.parse_args()
logger.info("Inference Parameters")
for key, val in vars(args).items():
logger.info(f" - {key}: {val}")
assert args.task.lower() in PROCESSOR_BY_TASK, f"Supported Tasks: {', '.join(PROCESSOR_BY_TASK.keys())}"
assert os.path.exists(args.output), f"Output path {args.output} does not exists"
assert os.path.exists(args.model + ".index"), f"Model path {args.model} does not exists"
assert os.path.exists(args.config), f"Config path {args.config} does not exists"
assert os.path.exists(args.dataset), f"Dataset path {args.dataset} does not exists"
assert os.path.exists(args.vocab), f"Vocab path {args.vocab} does not exists"
vocab = tokenizer.Vocab(args.vocab)
tokenizer = tokenizer.SubWordTokenizer(vocab, args.do_lower_case)
logger.info("Processing Data")
dataset_processor = PROCESSOR_BY_TASK[args.task.lower()]()
label_to_index = dataset_processor.get_label_to_index()
dev_dataset = dataset_processor.get_dev(args.dataset)
if len(dev_dataset) == 2:
# single sentence dataset
dev_dataset = convert_single_sentence(dev_dataset, label_to_index, tokenizer, args.max_sequence_length)
else:
# sentence pair dataset
dev_dataset = convert_sentence_pair(dev_dataset, label_to_index, tokenizer, args.max_sequence_length)
logger.info(f"Dev Dataset Size: {len(dev_dataset[0])}")
logger.info(f"Dev Batches: {get_total_batches(len(dev_dataset[0]), args.eval_batch_size)}")
dev_dataset = tf.data.Dataset.from_tensor_slices(dev_dataset).batch(args.eval_batch_size)
logger.info("Initialize model")
bert_config = models.BertConfig.from_json(args.config, use_splitted=True)
logger.info("Model Config")
for key, val in vars(bert_config).items():
logger.info(f" - {key}: {val}")
model = models.BertForClassification(bert_config, len(label_to_index))
assert bert_config.vocab_size == len(vocab), "Actual vocab size and that in bert config are different."
logger.info("Load Model Weights")
model.load_weights(args.model)
logger.info("Initialize Loss function")
criterion = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
eval_loss = tf.keras.metrics.Mean(name="eval_loss")
@tf.function
def eval_step(input_ids, token_type_ids, attention_mask, targets, head_mask):
preds, _ = model([input_ids, token_type_ids, attention_mask], head_mask=head_mask)
loss = criterion(targets, preds)
eval_loss.update_state(loss)
dataset_processor.update_state(targets, preds, validation=True)
def eval_dev(layer: int, head: int):
eval_loss.reset_states()
dataset_processor.reset_states(validation=True)
head_mask = tf.constant(
[
[
[
head_index != head or layer_index != layer
for head_index in range(bert_config.num_attention_heads)
]
for layer_index in range(bert_config.num_hidden_layers)
]
],
dtype=tf.float32,
)
for targets, input_ids, token_type_ids, attention_mask in dev_dataset:
eval_step(input_ids, token_type_ids, attention_mask, targets, head_mask)
logger.info(
f"[Eval] "
f"masking layer {layer} head {head}, "
f"loss: {eval_loss.result():.4f}, "
+ ", ".join([f"{key}: {val}" for key, val in dataset_processor.get_metrics(validation=True).items()])
)
logger.info("Start Inference")
eval_dev(-1, -1)
for layer in range(bert_config.num_hidden_layers):
for head in range(bert_config.num_attention_heads):
eval_dev(layer, head)
```
#### File: bert-optimization/tools/run_early_exit.py
```python
import logging
import os
import sys
import tensorflow as tf
import tensorflow_addons as tfa
from bert_optimization import glue_processor, models, tokenizer, utils
from bert_optimization.glue_processor import convert_sentence_pair, convert_single_sentence
from bert_optimization.optimizer.scheduler import BertScheduler
PROCESSOR_BY_TASK = {
"cola": glue_processor.CoLAProcessor,
"mrpc": glue_processor.MRPCProcessor,
"mnli": glue_processor.MNLIProcessor,
"sst-2": glue_processor.SST2Processor,
"rte": glue_processor.RTEProcessor,
"qqp": glue_processor.QQPProcessor,
}
def get_total_batches(dataset_size, batch_size):
return dataset_size // batch_size + bool(dataset_size % batch_size)
@tf.function
def build_bert_model_graph(bert_model: models.EarlyExitBertModelForClassification, bert_config: models.BertConfig):
token_ids = tf.keras.Input((None,), dtype=tf.int32)
token_type_ids = tf.keras.Input((None,), dtype=tf.int32)
attention_mask = tf.keras.Input((None,), dtype=tf.float32)
bert_model([token_ids, token_type_ids, attention_mask], speed=0.7, training=True)
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter("%(asctime)s: [%(levelname)s] %(message)s"))
logger.addHandler(handler)
parser = utils.get_default_bert_argument_parser()
args = parser.parse_args()
logger.info("Training Parameters")
for key, val in vars(args).items():
logger.info(f" - {key}: {val}")
assert args.task.lower() in PROCESSOR_BY_TASK, f"Supported Tasks: {', '.join(PROCESSOR_BY_TASK.keys())}"
assert os.path.exists(args.output), f"Output path {args.output} does not exists"
assert os.path.exists(args.model + ".index"), f"Model path {args.model} does not exists"
assert os.path.exists(args.config), f"Config path {args.config} does not exists"
assert os.path.exists(args.dataset), f"Dataset path {args.dataset} does not exists"
assert os.path.exists(args.vocab), f"Vocab path {args.vocab} does not exists"
vocab = tokenizer.Vocab(args.vocab)
tokenizer = tokenizer.SubWordTokenizer(vocab, args.do_lower_case)
logger.info("Processing Data")
dataset_processor = PROCESSOR_BY_TASK[args.task.lower()]()
label_to_index = dataset_processor.get_label_to_index()
train_dataset = dataset_processor.get_train(args.dataset)
dev_dataset = dataset_processor.get_dev(args.dataset)
if len(train_dataset) == 2:
# single sentence dataset
train_dataset = convert_single_sentence(train_dataset, label_to_index, tokenizer, args.max_sequence_length)
dev_dataset = convert_single_sentence(dev_dataset, label_to_index, tokenizer, args.max_sequence_length)
else:
# sentence pair dataset
train_dataset = convert_sentence_pair(train_dataset, label_to_index, tokenizer, args.max_sequence_length)
dev_dataset = convert_sentence_pair(dev_dataset, label_to_index, tokenizer, args.max_sequence_length)
logger.info(f"Train Dataset Size: {len(train_dataset[0])}")
logger.info(f"Dev Dataset Size: {len(dev_dataset[0])}")
train_batch_size = get_total_batches(len(train_dataset[0]), args.train_batch_size)
logger.info(f"Train Batches: {train_batch_size}")
logger.info(f"Dev Batches: {get_total_batches(len(dev_dataset[0]), args.eval_batch_size)}")
train_dataset = tf.data.Dataset.from_tensor_slices(train_dataset).shuffle(10000).batch(args.train_batch_size)
dev_dataset = tf.data.Dataset.from_tensor_slices(dev_dataset).batch(args.eval_batch_size)
logger.info("Initialize model")
bert_config = models.BertConfig.from_json(args.config, aware_quantization=args.aware_quantization)
logger.info("Model Config")
for key, val in vars(bert_config).items():
logger.info(f" - {key}: {val}")
model = models.EarlyExitBertModelForClassification(bert_config, len(label_to_index))
assert bert_config.vocab_size == len(vocab), "Actual vocab size and that in bert config are different."
logger.info("Load Model Weights")
build_bert_model_graph(model, bert_config)
utils.load_bert_weights(args.model, model, bert_config.use_splitted)
logger.info("Initialize Optimizer and Loss function")
global_step = tf.Variable(0.0, trainable=False)
scheduler = BertScheduler(args.warmup_ratio, train_batch_size * args.epoch)
learning_rate = lambda: args.learning_rate * scheduler(global_step)
weight_decay = lambda: args.weight_decay * args.learning_rate * scheduler(global_step)
optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=weight_decay, epsilon=1e-06)
excludes = ["layer_norm", "LayerNorm", "bias"]
decay_var_list = [v for v in model.trainable_variables if all(term not in v.name for term in excludes)]
criterion = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
criterion_self = tf.keras.losses.CategoricalCrossentropy(from_logits=False)
train_loss = tf.keras.metrics.Mean(name="train_loss")
eval_loss = tf.keras.metrics.Mean(name="eval_loss")
best_model_score = 0.0
@tf.function
def train_step(input_ids, token_type_ids, attention_mask, targets):
with tf.GradientTape() as tape:
preds = model([input_ids, token_type_ids, attention_mask], training=True)
loss = sum([((len(preds) - index) ** 0.5) * criterion(targets, pred) for index, pred in enumerate(preds)])
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables), decay_var_list=decay_var_list)
train_loss.update_state(loss)
dataset_processor.update_state(targets, preds[-1])
@tf.function
def eval_step(input_ids, token_type_ids, attention_mask, targets):
preds = model([input_ids, token_type_ids, attention_mask], speed=0.0)
loss = criterion(targets, preds[1])
eval_loss.update_state(loss)
dataset_processor.update_state(targets, preds[1], validation=True)
def eval_dev(best_model_score):
eval_loss.reset_states()
dataset_processor.reset_states(validation=True)
for targets, input_ids, token_type_ids, attention_mask in dev_dataset:
eval_step(input_ids, token_type_ids, attention_mask, targets)
logger.info(
f"[Eval] Epoch {epoch_index + 1} "
f"loss: {eval_loss.result()}, "
+ ", ".join([f"{key}: {val}" for key, val in dataset_processor.get_metrics(validation=True).items()])
)
if dataset_processor.get_key() > best_model_score:
logger.info("Reached Best Score.")
model_path = f"{args.output}/checkpoints/model-{args.task}-{dataset_processor.get_hash()}-epoch{epoch_index}-step{int(global_step.numpy()) + 1}"
model.save_weights(model_path)
logger.info(f"Saved model in {model_path}")
best_model_score = dataset_processor.get_key()
return best_model_score
logger.info("Start Training")
for epoch_index in range(args.epoch):
for step, (targets, input_ids, token_type_ids, attention_mask) in enumerate(train_dataset):
train_step(input_ids, token_type_ids, attention_mask, targets)
if (step + 1) % args.log_interval == 0:
logger.info(
f"Epoch {epoch_index + 1} "
f"step: {step + 1}, "
f"loss: {train_loss.result()}, "
+ ", ".join([f"{key}: {val}" for key, val in dataset_processor.get_metrics().items()])
)
train_loss.reset_states()
dataset_processor.reset_states()
if (step + 1) % args.val_interval == 0:
best_model_score = eval_dev(best_model_score)
global_step.assign_add(1.0)
logger.info(
f"Epoch {epoch_index + 1} "
f"loss: {train_loss.result()}, "
+ ", ".join([f"{key}: {val}" for key, val in dataset_processor.get_metrics().items()])
)
train_loss.reset_states()
dataset_processor.reset_states()
best_model_score = eval_dev(best_model_score)
```
|
{
"source": "jeongukjae/branching-entropy-with-pos-tagger",
"score": 2
}
|
#### File: jeongukjae/branching-entropy-with-pos-tagger/extract.py
```python
import csv
import glob
import math
import os
from collections import defaultdict
from multiprocessing import Pool
from typing import Generator, List, Tuple
import nori
from absl import app, flags, logging
from nori import Dictionary, NoriTokenizer
from tqdm import tqdm
FLAGS = flags.FLAGS
flags.DEFINE_string("corpus", "corpus", help="corpus directory")
flags.DEFINE_string("left_output", "entropy-table-left.csv", help="left output")
flags.DEFINE_string("right_output", "entropy-table-right.csv", help="right output")
flags.DEFINE_integer("max_rows", 1000, "max rows")
flags.DEFINE_integer("n_files", 1, "n_files")
SUFFIX_INDICATOR = "##"
dictionary_path = os.path.join(
os.path.dirname(nori.__file__),
"dictionary",
"latest-dictionary.nori",
)
logging.debug(f"dictionary path: {dictionary_path}")
dictionary = Dictionary()
dictionary.load_prebuilt_dictionary(dictionary_path)
tokenizer = NoriTokenizer(dictionary)
def main(argv):
files = glob.glob(os.path.join(FLAGS.corpus, "*"))[: FLAGS.n_files]
logging.info(f"Found {len(files)} files, files[:3]: {files[:3]}")
left_side_freq = defaultdict(lambda: defaultdict(int))
right_side_freq = defaultdict(lambda: defaultdict(int))
with Pool() as pool:
for filename in tqdm(files, position=0, desc="file"):
for left_results, right_results in pool.imap_unordered(
_tokenize_and_add_space_info,
tqdm(_read_file(filename), position=1, desc="line"),
chunksize=1_000,
):
for key, token in left_results:
left_side_freq[key][token] += 1
for key, token in right_results:
right_side_freq[key][token] += 1
def _dump_entropy(freq_dictionary, filename):
rows = []
for term, occurances in tqdm(
freq_dictionary.items(),
desc="calculate entropy...",
):
n_total = sum(v for _, v in occurances.items())
if n_total == 1 or len(occurances) == 1:
continue
if " " not in term:
continue
entropy = 0.0
for _, v in occurances.items():
freq = v / float(n_total)
entropy -= freq * math.log(freq)
rows.append({"term": term, "entropy": entropy})
rows = sorted(rows, key=lambda x: x["entropy"], reverse=True)
with open(filename, "w") as f:
writer = csv.DictWriter(f, fieldnames=["term", "entropy"])
writer.writeheader()
writer.writerows(rows[: FLAGS.max_rows])
_dump_entropy(left_side_freq, FLAGS.left_output)
_dump_entropy(right_side_freq, FLAGS.right_output)
def _read_file(filename: str) -> Generator[str, None, None]:
"""Read file content of given filename
This function will skip empty line
Returns:
Generator object that yield each line
"""
with open(filename) as f:
for line in f:
line = line.strip()
if not line:
continue
yield line
def _tokenize_and_add_space_info(
line: str,
) -> Tuple[List[List[Tuple[str, str]]], List[List[Tuple[str, str]]]]:
"""tokenize line and return tokens like BertTokenizer
Example:
word branching (original line)
-> wo rd branch ing (tokenization output)
-> wo ##rd branch ##ing (output of this function)
"""
tokens = tokenizer.tokenize(line).tokens[1:-1] # remove BOS/EOS
next_offset = -1
results = []
for token in tokens:
if next_offset != token.offset:
results.append([(token.surface, token)])
else:
results[-1].append((SUFFIX_INDICATOR + token.surface, token))
next_offset = token.offset + token.length
left_results = []
right_results = []
for tokens in results:
num_tokens = len(tokens)
if num_tokens == 1:
continue
for i in range(1, num_tokens):
if any(p.startswith("S") for token in tokens[:i] for p in token[1].postag):
continue
block_list = {"E", "J"}
if any(p in block_list for p in tokens[i - 1][1].postag):
continue
key = " ".join(t[0] for t in tokens[:i])
left_results.append((key, tokens[i][0]))
for i in range(1, num_tokens):
if any(p.startswith("S") for token in tokens[-i:] for p in token[1].postag):
continue
key = " ".join(t[0] for t in tokens[-i:])
right_results.append((key, tokens[-i - 1][0]))
return left_results, right_results
if __name__ == "__main__":
app.run(main)
```
|
{
"source": "jeongukjae/flask-compressed",
"score": 3
}
|
#### File: flask-compressed/flask_compressed/compression.py
```python
import gzip
import zlib
class Compression:
_encoding_name = ''
@staticmethod
def compress(content, **kwargs):
raise NotImplementedError
@staticmethod
def decompress(compressed, **kwargs):
raise NotImplementedError
class Gzip(Compression):
_encoding_name = 'gzip'
@staticmethod
def compress(content, level=5):
if not isinstance(content, bytes):
raise ValueError("content must be bytes literal")
return gzip.compress(content)
@staticmethod
def decompress(compressed):
if not isinstance(compressed, bytes):
raise ValueError("content must be bytes literal")
return gzip.decompress(compressed)
class Deflate(Compression):
_encoding_name = 'deflate'
@staticmethod
def compress(content):
if not isinstance(content, bytes):
raise ValueError("content must be bytes literal")
return zlib.compress(content)
@staticmethod
def decompress(compressed):
if not isinstance(compressed, bytes):
raise ValueError("content must be bytes literal")
return zlib.decompress(compressed)
```
#### File: flask-compressed/tests/test_app.py
```python
import gzip
import zlib
import json
import pytest
from flask import Flask, g
from flask_compressed import FlaskCompressed, compress_as_gzip
def test_flask_compressed():
flask_app = Flask(__name__)
FlaskCompressed(flask_app)
# attached to flask app's hooks
assert len(flask_app.before_request_funcs) == 1
def test_send_gzip():
flask_app = Flask(__name__)
FlaskCompressed(flask_app)
flask_app.config['TESTING'] = True
flask_app.route('/', methods=['POST'])(lambda: g.body)
with open('./tests/data/test1.json', 'r') as f:
original_data = f.read().encode('utf8')
compressed_data = gzip.compress(original_data)
with flask_app.test_client() as client:
response = client.post(
'/',
data=compressed_data,
headers=dict({'Content-Encoding': 'gzip'}))
assert response.data == original_data
def test_send_zlib():
flask_app = Flask(__name__)
FlaskCompressed(flask_app)
flask_app.config['TESTING'] = True
flask_app.route('/', methods=['POST'])(lambda: g.body)
with open('./tests/data/test1.json', 'r') as f:
original_data = f.read().encode('utf8')
compressed_data = zlib.compress(original_data)
with flask_app.test_client() as client:
response = client.post(
'/',
data=compressed_data,
headers=dict({'Content-Encoding': 'deflate'}))
assert response.data == original_data
def test_set_unsupported_encodings():
flask_app = Flask(__name__)
with pytest.raises(ValueError):
FlaskCompressed(flask_app, encodings=('some-encoding'))
def test_send_unsupported_encodings():
flask_app = Flask(__name__)
FlaskCompressed(flask_app)
@flask_app.route('/')
def echo():
return g.body
with flask_app.test_client() as client:
rv = client.post(
'/',
headers={'Content-Encoding': 'gzip, unsupported, deflate'},
data=b'some-data')
assert rv.status_code == 500
def test_send_multiple():
flask_app = Flask(__name__)
FlaskCompressed(flask_app)
flask_app.config['TESTING'] = True
flask_app.route('/', methods=['POST'])(lambda: g.body)
with open('./tests/data/test1.json', 'r') as f:
original_data = f.read().encode('utf8')
compressed_data = zlib.compress(original_data)
compressed_data = gzip.compress(compressed_data)
with flask_app.test_client() as client:
response = client.post(
'/',
data=compressed_data,
headers=dict({'Content-Encoding': 'deflate, gzip'}))
assert response.data == original_data
def test_response():
flask_app = Flask(__name__)
FlaskCompressed(flask_app)
messages = json.dumps({'message': 'hello'})
@flask_app.route('/')
@compress_as_gzip
def index():
return messages, 200
with flask_app.test_client() as client:
response = client.get('/')
assert messages.encode('utf8') == gzip.decompress(response.get_data())
```
|
{
"source": "jeongukjae/kcbert-tf",
"score": 2
}
|
#### File: jeongukjae/kcbert-tf/convert_base.py
```python
import os
import numpy as np
import tensorflow as tf
import tensorflow_text as text
import torch
from transformers import AutoTokenizer, BertModel as TorchBertModel
from model import BertConfig, BertModel
tokenizer = AutoTokenizer.from_pretrained("beomi/kcbert-base")
torch_model = TorchBertModel.from_pretrained("beomi/kcbert-base").eval()
if not os.path.isdir("kcbert-base"):
os.mkdir("kcbert-base")
tokenizer.save_vocabulary("kcbert-base")
config = BertConfig(vocab_size=30000)
model = BertModel(config)
model(
{
"input_word_ids": tf.keras.Input(shape=[None], dtype=tf.int64),
"input_mask": tf.keras.Input(shape=[None], dtype=tf.int64),
"input_type_ids": tf.keras.Input(shape=[None], dtype=tf.int64),
}
)
sd = torch_model.state_dict()
model.token_embeddings.set_weights([sd["embeddings.word_embeddings.weight"].numpy()])
model.position_embeddings.set_weights([sd["embeddings.position_embeddings.weight"].numpy()])
model.token_type_embeddings.set_weights([sd["embeddings.token_type_embeddings.weight"].numpy()])
model.embedding_layer_norm.set_weights([sd["embeddings.LayerNorm.weight"], sd["embeddings.LayerNorm.bias"]])
for i in range(config.num_hidden_layers):
qkv_weight = np.concatenate([
sd[f"encoder.layer.{i}.attention.self.query.weight"].T,
sd[f"encoder.layer.{i}.attention.self.key.weight"].T,
sd[f"encoder.layer.{i}.attention.self.value.weight"].T,
], axis=1)
qkv_bias = np.concatenate([
sd[f"encoder.layer.{i}.attention.self.query.bias"],
sd[f"encoder.layer.{i}.attention.self.key.bias"],
sd[f"encoder.layer.{i}.attention.self.value.bias"],
], axis=0)
model.encoders[i].qkv.set_weights([qkv_weight, qkv_bias])
model.encoders[i].attention_dense.set_weights([
sd[f"encoder.layer.{i}.attention.output.dense.weight"].T,
sd[f"encoder.layer.{i}.attention.output.dense.bias"],
])
model.encoders[i].attention_layer_norm.set_weights([
sd[f'encoder.layer.{i}.attention.output.LayerNorm.weight'],
sd[f'encoder.layer.{i}.attention.output.LayerNorm.bias'],
])
model.encoders[i].intermediate_dense.set_weights([
sd[f'encoder.layer.{i}.intermediate.dense.weight'].T,
sd[f'encoder.layer.{i}.intermediate.dense.bias'],
])
model.encoders[i].intermediate_dense2.set_weights([
sd[f'encoder.layer.{i}.output.dense.weight'].T,
sd[f'encoder.layer.{i}.output.dense.bias'],
])
model.encoders[i].intermediate_layer_norm.set_weights([
sd[f'encoder.layer.{i}.output.LayerNorm.weight'],
sd[f'encoder.layer.{i}.output.LayerNorm.bias'],
])
model.pooler_layer.set_weights([
sd['pooler.dense.weight'].T,
sd['pooler.dense.bias'],
])
tf.saved_model.save(model, 'kcbert-base/model/0')
to_export = tf.Module()
tokenizer = text.BertTokenizer('./kcbert-base/vocab.txt')
cls_id = 2
sep_id = 3
@tf.function(input_signature=[tf.TensorSpec([None], tf.string), tf.TensorSpec([], tf.int32)])
def call(input_tensor, seq_length):
batch_size = tf.shape(input_tensor)[0]
def _parse_single_sentence(x):
return tf.concat([[cls_id], x[: seq_length - 2], [sep_id]], axis=0)
tokenized = tokenizer.tokenize(input_tensor)
tokenized = tokenized.merge_dims(1, 2)
input_word_ids = tf.map_fn(
_parse_single_sentence,
tokenized,
fn_output_signature=tf.RaggedTensorSpec([None], tf.int64),
)
input_mask = tf.ones_like(input_word_ids, dtype=tf.int64)
input_type_ids = tf.zeros_like(input_word_ids, dtype=tf.int64)
return {
"input_word_ids": input_word_ids.to_tensor(shape=[batch_size, seq_length]),
"input_mask": input_mask.to_tensor(shape=[batch_size, seq_length]),
"input_type_ids": input_type_ids.to_tensor(shape=[batch_size, seq_length]),
}
@tf.function(input_signature=[[tf.TensorSpec([None], tf.string), tf.TensorSpec([None], tf.string)], tf.TensorSpec([], tf.int32)])
def call_2(input_tensor, seq_length):
segment_a = input_tensor[0]
segment_b = input_tensor[1]
batch_size = tf.shape(segment_a)[0]
segment_a = tokenizer.tokenize(segment_a)
segment_b = tokenizer.tokenize(segment_b)
segment_a = segment_a.merge_dims(1, 2)
segment_b = segment_b.merge_dims(1, 2)
def _parse_single_sentence(x):
a = x[0]
b = x[1]
a_len = tf.minimum(tf.size(a), seq_length - 3 - tf.size(b))
input_word_ids = tf.concat(
[[cls_id], a[:a_len], [sep_id], b, [sep_id]], axis=0
)
input_mask = tf.ones_like(input_word_ids, dtype=tf.int64)
input_type_ids = tf.ragged.row_splits_to_segment_ids(
[0, a_len + 2, tf.size(input_word_ids)]
)
return input_word_ids, input_mask, input_type_ids
input_word_ids, input_mask, input_type_ids = tf.map_fn(
_parse_single_sentence,
[segment_a, segment_b],
fn_output_signature=(
tf.RaggedTensorSpec([None], tf.int64),
tf.RaggedTensorSpec([None], tf.int64),
tf.RaggedTensorSpec([None], tf.int64),
),
)
return {
"input_word_ids": input_word_ids.to_tensor(shape=[batch_size, seq_length]),
"input_mask": input_mask.to_tensor(shape=[batch_size, seq_length]),
"input_type_ids": input_type_ids.to_tensor(shape=[batch_size, seq_length]),
}
to_export.__call__ = call
to_export.call_2 = tf.Module()
to_export.call_2.__call__ = call_2
to_export.call_2.tokenizer = tokenizer
to_export.call_2.sep_id = sep_id
to_export.call_2.cls_id = cls_id
tf.saved_model.save(to_export, 'kcbert-base/preprocess/0')
```
|
{
"source": "jeongukjae/korean-spacing-model",
"score": 2
}
|
#### File: jeongukjae/korean-spacing-model/benchmark.py
```python
import json
import time
from argparse import ArgumentParser
import tensorflow as tf
from train import SpacingModel
parser = ArgumentParser()
parser.add_argument("--training-config", type=str, required=True)
parser.add_argument("--batch-size", type=int, required=True)
parser.add_argument("--sequence-length", type=int, required=True)
args = parser.parse_args()
with open(args.training_config) as f:
config = json.load(f)
model = SpacingModel(
config["vocab_size"],
config["hidden_size"],
conv_activation=config["conv_activation"],
dense_activation=config["dense_activation"],
conv_kernel_and_filter_sizes=config["conv_kernel_and_filter_sizes"],
dropout_rate=config["dropout_rate"],
)
model(tf.keras.Input([None], dtype=tf.int32))
@tf.function
def run(batch):
return model(batch)
print("Warmup stage (10 iteration)")
for _ in range(10):
run(tf.random.uniform((args.batch_size, args.sequence_length), maxval=config["vocab_size"], dtype=tf.int32))
print("Benchmark model speed with random input (1000 iteration)")
s = time.time()
for _ in range(1000):
run(tf.random.uniform((args.batch_size, args.sequence_length), maxval=config["vocab_size"], dtype=tf.int32))
elapsed = time.time() - s
print("Elapsed:", elapsed, "s")
print("Per batch:", elapsed / 1000, "s")
print("Per sentence:", elapsed / 1000 / args.batch_size, "s")
```
|
{
"source": "jeongukjae/KR-BERT-SimCSE",
"score": 2
}
|
#### File: jeongukjae/KR-BERT-SimCSE/evaluate_korsts.py
```python
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_text as text
import tfds_korean.korsts # noqa
from absl import app, flags, logging
from scipy import stats
from model import BertConfig, BertModel, CosineSimilarity
from train_unsupervised import get_single_bert_input
FLAGS = flags.FLAGS
def def_flags():
flags.DEFINE_string("config", "./configs/char_bert_base.json", help="bert config")
flags.DEFINE_string("weight", "", help="bert weight")
flags.DEFINE_string("vocab_path", "vocabs/vocab_char_16424.txt", help="Vocab path")
flags.DEFINE_string("split", "dev", help="split name")
flags.DEFINE_integer("batch_size", 64, help="batch size")
flags.DEFINE_integer("max_sequence_length", 64, help="max sequence length")
def main(argv):
tokenizer = text.BertTokenizer(FLAGS.vocab_path, unknown_token="[UNK]")
pad_id = tokenizer._wordpiece_tokenizer._vocab_lookup_table.lookup(tf.constant("[PAD]"))
cls_id = tokenizer._wordpiece_tokenizer._vocab_lookup_table.lookup(tf.constant("[CLS]"))
sep_id = tokenizer._wordpiece_tokenizer._vocab_lookup_table.lookup(tf.constant("[SEP]"))
bert_config = BertConfig.from_json(FLAGS.config)
bert_model = BertModel(bert_config, name="bert_model")
logging.info(f"Load weights from {FLAGS.weight}")
bert_model.load_weights(FLAGS.weight)
dataset = tfds.load("korsts", split=FLAGS.split).batch(FLAGS.batch_size)
bert_input_fn = get_single_bert_input(
tokenizer=tokenizer,
pad_id=pad_id,
cls_id=cls_id,
sep_id=sep_id,
max_sequence_length=FLAGS.max_sequence_length,
)
@tf.function
def calculate_similarity(sentence1, sentence2):
representation1 = bert_model(bert_input_fn(sentence1))["sequence_output"][:, 0]
representation2 = bert_model(bert_input_fn(sentence2))["sequence_output"][:, 0]
return CosineSimilarity()([representation1, representation2])
label_score = []
pred_score = []
for item in dataset:
label_score.append(item["score"])
pred_score.append(calculate_similarity(item["sentence1"], item["sentence2"]))
label_score = tf.concat(label_score, axis=0)
pred_score = tf.concat(pred_score, axis=0)
print(stats.spearmanr(label_score, pred_score))
if __name__ == "__main__":
def_flags()
app.run(main)
```
|
{
"source": "jeongukjae/module-tracker.py",
"score": 3
}
|
#### File: module-tracker.py/module_tracker/analyzer.py
```python
import ast
from typing import List, Type, Union, cast
ImportType = Type[Union[ast.Import, ast.ImportFrom]]
def parse_file(file_name: str) -> List[str]:
with open(file_name) as f:
return parse_file_content(f.read())
def parse_file_content(file_content: str) -> List[str]:
parsed_file = ast.parse(file_content)
return [
import_name
for node in ast.walk(parsed_file)
if _is_import_statement(node)
for import_name in _get_imported_modules(node)
]
def _is_import_statement(node: ast.AST) -> bool:
return isinstance(node, (ast.Import, ast.ImportFrom))
def _get_imported_modules(node: ast.AST):
import_statement = cast(ImportType, node)
if isinstance(import_statement, ast.Import):
return _get_modules_from_import_statement(import_statement)
elif isinstance(import_statement, ast.ImportFrom):
return _get_modules_from_import_from_statement(import_statement)
return []
def _get_modules_from_import_statement(import_statement):
return [_normalized_name(alias.name) for alias in import_statement.names]
def _get_modules_from_import_from_statement(import_from_statement):
return [_normalized_name(import_from_statement.module)] if import_from_statement.level == 0 else []
def _normalized_name(import_name: str) -> str:
return import_name.split(".")[0]
```
#### File: module-tracker.py/tests/test_analyzer.py
```python
import pytest
from module_tracker.analyzer import parse_file_content
@pytest.mark.parametrize(
"file_content,result",
[
pytest.param("import ast", ["ast"]),
pytest.param("from ast import AST", ["ast"]),
pytest.param("import ast as a", ["ast"]),
pytest.param("import ast, time, os", ["ast", "time", "os"]),
pytest.param("from .some.relative import kk", []),
pytest.param("from ...some.relative import kk", []),
],
)
def test_analyze_import_statement(file_content, result):
assert parse_file_content(file_content) == result
```
|
{
"source": "jeongukjae/multilingual-bert-hate-speech-detection",
"score": 3
}
|
#### File: jeongukjae/multilingual-bert-hate-speech-detection/training.py
```python
import csv
import os
import copy
import random
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_addons as tfa
import tensorflow_text as text
from tqdm import tqdm
datasets = [filename for filename in os.listdir(".") if filename.endswith(".csv")]
dataset_file_to_lang = {
"multilingual_fairness_lrec_English.csv": "english",
"hate_speech_mlma_en_dataset_with_stop_words.csv": "english",
"multilingual_fairness_lrec_Spanish.csv": "spanish",
"hate_speech_mlma_fr_dataset.csv": "france",
"hate_speech_mlma_ar_dataset.csv": "arabic",
"hate_sonar.csv": "english",
"multilingual_fairness_lrec_Italian.csv": "itailian",
"multilingual_fairness_lrec_Portuguese.csv": "portuguese",
"korean_hate_speech.csv": "korean",
"korean-malicious-comments.csv": "korean",
"hate_speech_mlma_en_dataset.csv": "english",
"multilingual_fairness_lrec_Polish.csv": "polish",
}
print(set(dataset_file_to_lang.values()))
dataset_by_lang = {
"polish": [],
"korean": [],
"portuguese": [],
"arabic": [],
"france": [],
"spanish": [],
"itailian": [],
"english": [],
}
for filename, lang in dataset_file_to_lang.items():
with open(filename, encoding="utf8") as f:
dataset_by_lang[lang].extend(
[
(line[0], int(line[1])) if len(line) == 2 else ((line[0], line[1]), int(line[2]))
for line in csv.reader(f)
]
)
train_dataset_by_lang = {
"polish": [],
"korean": [],
"portuguese": [],
"arabic": [],
"france": [],
"spanish": [],
"itailian": [],
"english": [],
}
dev_dataset_by_lang = copy.deepcopy(train_dataset_by_lang)
for lang in dataset_by_lang.keys():
random.shuffle(dataset_by_lang[lang])
length = len(dataset_by_lang[lang])
dev_length = int(length * 0.1)
dev_dataset_by_lang[lang] = dataset_by_lang[lang][:dev_length]
train_dataset_by_lang[lang] = dataset_by_lang[lang][dev_length:]
for lang in dataset_by_lang.keys():
print(lang)
print(" train: " + str(len(train_dataset_by_lang[lang])))
print(" dev: " + str(len(dev_dataset_by_lang[lang])))
train_set = [data for data_by_lang in train_dataset_by_lang.values() for data in data_by_lang]
dev_set = [data for data_by_lang in dev_dataset_by_lang.values() for data in data_by_lang]
print(len(train_set), len(dev_set))
preprocessor = hub.load("https://tfhub.dev/tensorflow/bert_multi_cased_preprocess/2")
tokenize = hub.KerasLayer(preprocessor.tokenize)
bert_pack_inputs = hub.KerasLayer(preprocessor.bert_pack_inputs, arguments=dict(seq_length=128))
single_bert_input = hub.KerasLayer(preprocessor)
def take_first(item):
return {
"input_mask": item["input_mask"][0],
"input_type_ids": item["input_type_ids"][0],
"input_word_ids": item["input_word_ids"][0],
}
@tf.function(input_signature=[tf.TensorSpec([], dtype=tf.string), tf.TensorSpec([], dtype=tf.int32)])
def parse_single(item, label):
return [take_first(single_bert_input([item])), tf.one_hot(label, 2)]
@tf.function(
input_signature=[
tf.TensorSpec([], dtype=tf.string),
tf.TensorSpec([], dtype=tf.string),
tf.TensorSpec([], dtype=tf.int32),
]
)
def parse_multi(item1, item2, label):
return [take_first(bert_pack_inputs([tokenize([item1]), tokenize([item2])])), tf.one_hot(label, 2)]
train_tensor_set = [
(parse_single(item[0], item[1]) if not isinstance(item[0], tuple) else parse_multi(item[0][0], item[0][1], item[1]))
for item in tqdm(train_set)
]
dev_tensor_set = [
(parse_single(item[0], item[1]) if not isinstance(item[0], tuple) else parse_multi(item[0][0], item[0][1], item[1]))
for item in tqdm(dev_set)
]
train_set = tf.data.Dataset.zip(
(
tf.data.Dataset.from_tensor_slices(
{
"input_mask": [i[0]["input_mask"] for i in train_tensor_set],
"input_type_ids": [i[0]["input_type_ids"] for i in train_tensor_set],
"input_word_ids": [i[0]["input_word_ids"] for i in train_tensor_set],
}
),
tf.data.Dataset.from_tensor_slices([i[1] for i in train_tensor_set]),
)
)
print(train_set.element_spec)
dev_set = tf.data.Dataset.zip(
(
tf.data.Dataset.from_tensor_slices(
{
"input_mask": [i[0]["input_mask"] for i in dev_tensor_set],
"input_type_ids": [i[0]["input_type_ids"] for i in dev_tensor_set],
"input_word_ids": [i[0]["input_word_ids"] for i in dev_tensor_set],
}
),
tf.data.Dataset.from_tensor_slices([i[1] for i in dev_tensor_set]),
)
)
print(dev_set.element_spec)
encoder = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3", trainable=True)
def create_model():
input_node = {
"input_mask": tf.keras.Input([None], dtype=tf.int32),
"input_type_ids": tf.keras.Input([None], dtype=tf.int32),
"input_word_ids": tf.keras.Input([None], dtype=tf.int32),
}
encoder_outputs = encoder(input_node)["pooled_output"]
output_prob = tf.keras.layers.Dense(2, activation="softmax", name="output")(encoder_outputs)
model = tf.keras.Model(input_node, output_prob)
return model
train_set = train_set.shuffle(len(train_tensor_set), reshuffle_each_iteration=True).batch(32, drop_remainder=True)
dev_set = dev_set.batch(64)
print("total example: " + str(len(train_tensor_set)))
print("step per epoch: " + str(len(train_tensor_set) // 32))
model = create_model()
model.compile(
optimizer=tf.keras.optimizers.Adamax(
learning_rate=tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=5e-5,
decay_steps=100,
decay_rate=0.97,
)
),
loss=tf.keras.losses.CategoricalCrossentropy(),
metrics=["acc", tfa.metrics.F1Score(num_classes=2, name="f1")],
)
model.fit(
train_set,
epochs=3,
validation_data=dev_set,
callbacks=[
tf.keras.callbacks.ModelCheckpoint("./models/weights.epoch-{epoch:02d}", verbose=1),
tf.keras.callbacks.TensorBoard("./logs", update_freq="batch"),
],
)
```
|
{
"source": "jeongukjae/nori-clone",
"score": 3
}
|
#### File: tools/benchmark/nori_clone_runner.py
```python
import time
from sys import argv, stdin
import nori
dictionary = nori.Dictionary()
dictionary.load_prebuilt_dictionary("./dictionary/latest-dictionary.nori")
dictionary.load_user_dictionary("./dictionary/latest-userdict.txt")
tokenizer = nori.NoriTokenizer(dictionary)
def run_with_iterator(f):
for line in f:
tokenizer.tokenize(line)
if len(argv) != 1:
# read all lines from the input file
with open(argv[1]) as f:
lines = f.readlines()[:int(argv[2])]
start_time = time.time()
run_with_iterator(lines)
else:
start_time = time.time()
run_with_iterator(stdin)
end_time = time.time()
time_diff = (end_time - start_time)
print(int(time_diff * 1000))
```
#### File: tools/comparison/nori_clone_runner.py
```python
import time
from sys import argv, stdin
import nori
dictionary = nori.Dictionary()
dictionary.load_prebuilt_dictionary("./dictionary/legacy-dictionary.nori")
dictionary.load_user_dictionary("./dictionary/legacy-userdict.txt")
tokenizer = nori.NoriTokenizer(dictionary)
def run_with_iterator(f):
for line in f:
result = tokenizer.tokenize(line)
print(line.rstrip())
for token in result.tokens[1:-1]:
print(f"{token.surface}, {token.postype}, {token.postag[0]}, {token.postag[-1]}")
print()
if len(argv) != 1:
# read all lines from the input file
with open(argv[1]) as f:
lines = f.readlines()
start_time = time.time()
run_with_iterator(lines)
else:
start_time = time.time()
run_with_iterator(stdin)
end_time = time.time()
time_diff = (end_time - start_time)
print("Elapsed time:", time_diff * 1000, "ms")
```
#### File: lint/google_java_format/def.bzl
```python
load("@bazel_skylib//lib:shell.bzl", "shell")
def _google_java_format_impl_factory(ctx, test_rule = False):
args = ["--aosp"]
if test_rule:
args.append("--set-exit-if-changed")
else:
args.append("--replace")
runner_files = depset(ctx.files._runner).to_list()
if len(runner_files) != 1:
fail("length _runner's file should be 1")
jar_file = runner_files[0]
executable = ctx.actions.declare_file(ctx.attr.name + ".bash")
exclude_patterns_str = " ".join(["\\! -path %s" % shell.quote(pattern) for pattern in ctx.attr.exclude_patterns])
substitutions = {
"@@ARGS@@": shell.array_literal(args),
"@@JAR@@": shell.quote(jar_file.short_path),
"@@EXCLUDE_PATTERNS@@": exclude_patterns_str,
}
ctx.actions.expand_template(
template = ctx.file._template,
output = executable,
substitutions = substitutions,
is_executable = True,
)
runfiles = [jar_file]
if test_rule:
runfiles.extend(ctx.files.srcs)
return DefaultInfo(
files = depset([executable]),
executable = executable,
runfiles = ctx.runfiles(files = runfiles),
)
def _get_attrs_for_google_java_format(test_rule = False):
attrs = {
"exclude_patterns": attr.string_list(allow_empty = True),
"_runner": attr.label(
default = "@com_github_google_google_java_format//jar",
cfg = "host",
),
"_template": attr.label(
default = "//tools/lint/google_java_format:google_java_format.template.bash",
allow_single_file = True,
),
}
if test_rule:
attrs.update({
"srcs": attr.label_list(
allow_empty = False,
allow_files = [".java"],
),
})
return attrs
def _google_java_format_impl(ctx):
return [_google_java_format_impl_factory(ctx, False)]
google_java_format = rule(
attrs = _get_attrs_for_google_java_format(False),
implementation = _google_java_format_impl,
executable = True,
)
def _google_java_format_test_impl(ctx):
return [_google_java_format_impl_factory(ctx, True)]
google_java_format_test = rule(
attrs = _get_attrs_for_google_java_format(True),
implementation = _google_java_format_test_impl,
test = True,
)
```
|
{
"source": "JeongUkJae/othello-ml-agent-implementation",
"score": 3
}
|
#### File: JeongUkJae/othello-ml-agent-implementation/train.py
```python
import sys
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, BatchNormalization
from keras.utils import to_categorical
from PIL import Image
from othello_ml import Othello, Action
from othello_ml.visualizer import Visualizer
class MLRenderer:
def __init__(self, path='./result-prob'):
self.count = 0
self.path = path
def render(self, board):
board = board.reshape((8, 8)) * 10000
print(board)
image = Image.fromarray(board.astype('uint8'), 'L')
image = image.resize((400, 400))
image.save(f"{self.path}task{self.count}.png")
self.count += 1
class MLAgent:
def __init__(self,
othello,
model,
eps=0.999,
random_rate=1.,
no_reward=False,
renderer=None):
self.random_rate = random_rate
self.othello = othello
self.directions = self.othello.directions
self.model = model
self.eps = eps
self.turn = None
self.renderer = renderer
self.sorted_prediction = None
self.predict_before = False
self.predict_index = 0
othello.agent_actor(self.act)
if not no_reward:
othello.agent_reward(self.reward)
def _is_opposites_nearby(self, board, point, opposite):
if board[point[0]][point[1]] is not 0:
return False
for dir in self.directions:
x = point[0] + dir[0]
y = point[1] + dir[1]
if x < 0 or x > 7 or y < 0 or y > 7:
continue
if board[x][y] is opposite:
return True
return False
def get_available_places(self, board, disk):
places = []
opposite = -1 if disk is 1 else 1
for x in range(8):
for y in range(8):
if self._is_opposites_nearby(board, (x, y), opposite):
action = Action()
action.x, action.y = x, y
if self.othello._is_valid_action(0 if disk is -1 else 1,
action):
places.append((x, y))
return places
def act(self, board, turn, invalid_before):
action = Action()
my_disk = -1 if turn is 0 else 1
if invalid_before:
if self.predict_before:
self.predict_index += 1
action.x, action.y = self.sorted_prediction[self.predict_index]
return action, False
places = self.get_available_places(board, my_disk)
self.predict_before = False
if places:
action.x, action.y = places[random.randrange(len(places))]
return action, False
else:
return None, True
self.random_rate *= self.eps
if self.random_rate > np.random.uniform():
places = self.get_available_places(board, my_disk)
self.predict_before = False
if places:
action.x, action.y = places[random.randrange(len(places))]
return action, False
else:
return None, True
self.turn = turn
self.predict_index = 0
board = [[1 if i is my_disk else 0 if i is 0 else -1 for i in sl]
for sl in board]
result = self.model.predict(
np.reshape(np.array([board]), (1, 8, 8, 1)))[:, :, :, 1]
if self.renderer is not None:
self.renderer.render(result)
self.sorted_prediction = np.dstack(
np.unravel_index(np.argsort(result.ravel()), (8, 8)))[0]
action.x, action.y = self.sorted_prediction[0]
self.predict_before = True
return action, False
def reward(self, boards, reward):
my_disk = -1 if self.turn is 0 else 1
converted = []
actions = []
for action, board in boards:
converted.append([[(1 if i is my_disk else 0 if i is 0 else -1)
for i in sl] for sl in board])
ac = np.zeros((8, 8))
ac[(action.x, action.y)] = 1
actions.append(ac)
converted = np.array(converted)
actions = np.array(actions)
if reward > 0:
model.fit(
converted.reshape((-1, 8, 8, 1)),
to_categorical(actions.reshape((-1, 8, 8, 1))),
epochs=reward)
if __name__ == "__main__":
model = Sequential([
Conv2D(
32,
kernel_size=(3, 3),
input_shape=(8, 8, 1),
padding='same',
activation='relu'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(32, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(2, (1, 1), activation='softmax', padding='same'),
])
model.summary()
model.compile(
'adam', loss='categorical_crossentropy', metrics=['accuracy'])
experience_replay = []
num_of_episode = 1000
result = model.predict(np.zeros((1, 8, 8, 1)))
result.shape
for i in range(num_of_episode):
print(f"episode {i}")
othello = Othello()
agent1 = MLAgent(othello, model)
agent2 = MLAgent(othello, model)
# visualizer = Visualizer(othello, path=f'./result/{i}')
othello.play()
model.save_weights('episode_1000.h5')
```
#### File: JeongUkJae/othello-ml-agent-implementation/vs_people.py
```python
import sys
import random
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, BatchNormalization
from keras.utils import to_categorical
from othello_ml import Othello, Action
from othello_ml.visualizer import Visualizer
from train import MLAgent, MLRenderer
class CliAgent:
def __init__(self, othello):
self.othello = othello
othello.agent_actor(self.act)
def act(self, board, turn, invalid_before):
if invalid_before:
print("์ ์์ ์ธ ์๋ฅผ ๋์๊ธฐ ๋ฐ๋๋๋ค.")
for row in board:
print(
"|", "|".join(
map(lambda x: 'y' if x is 1 else 'n' if x is -1 else 'O',
row)), "|")
is_pass = 1 if input('ํจ์ค์
๋๊น? yn') == 'y' else 0
try:
x = int(input('x:'))
y = int(input('y:'))
except:
x = y = 0
is_pass = True
print('์ ๋๋ก ๋ ์
๋ ฅ์ด ์๋๊ธฐ ๋๋ฌธ์ ํจ์คํฉ๋๋ค.')
action = Action(x=x, y=y)
return action, is_pass
model = Sequential([
Conv2D(
32,
kernel_size=(3, 3),
input_shape=(8, 8, 1),
padding='same',
activation='relu'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(64, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(32, (3, 3), activation='relu', padding='same'),
BatchNormalization(),
Conv2D(2, (1, 1), activation='softmax', padding='same'),
])
model.summary()
model.load_weights("episode_1000.h5")
while True:
othello = Othello()
renderer = MLRenderer(path='./result/test-prob-')
agent1 = MLAgent(
othello, model, random_rate=0, no_reward=True, renderer=renderer)
agent2 = CliAgent(othello)
visualizer = Visualizer(othello, path=f'./result/test-')
othello.play()
```
|
{
"source": "jeongukjae/python-mecab",
"score": 3
}
|
#### File: python/tests/test_eval.py
```python
import os
from mecab.cli import run_mecab_system_eval
def test_eval(tmpdir):
"""check evaluate command"""
EVAL_DATA_PATH = "../../test-data/eval"
SYSTEM_DATA_PATH = os.path.join(EVAL_DATA_PATH, "system")
ANSWER_DATA_PATH = os.path.join(EVAL_DATA_PATH, "answer")
TRUE_DATA_PATH = os.path.join(EVAL_DATA_PATH, "test.gld")
assert os.path.exists(EVAL_DATA_PATH)
assert os.path.exists(SYSTEM_DATA_PATH)
assert os.path.exists(ANSWER_DATA_PATH)
assert os.path.exists(TRUE_DATA_PATH)
RESULT_PATH = tmpdir.join("test.out")
run_mecab_system_eval(["eval", "-l", "0 1 2 3 4", "-o", str(RESULT_PATH), SYSTEM_DATA_PATH, ANSWER_DATA_PATH])
with open(TRUE_DATA_PATH, "rb") as f:
assert f.read() == RESULT_PATH.read(mode="rb")
```
|
{
"source": "JeongUkJae/pytistory",
"score": 3
}
|
#### File: pytistory/tests/test_category.py
```python
import os
import warnings
import unittest
import requests_mock
from pytistory import PyTistory
class TestCategory(unittest.TestCase):
@requests_mock.mock()
def setUp(self, mock):
mock.post('https://www.tistory.com/auth/login', status_code=302)
mock.get('https://www.tistory.com/oauth/authorize', status_code=302, headers={
'Location': 'some_callback_url/#access_token=some-access-token&state=some-state'})
self.pytistory = PyTistory()
self.pytistory.configure(client_id='example client id',
tistory_id='example tistory id',
tistory_password='<PASSWORD>')
@requests_mock.mock()
def test_์นดํ
๊ณ ๋ฆฌ_๋ชฉ๋ก(self, mock):
mock.get('https://www.tistory.com/apis/category/list', json={
"tistory": {
"status": "200",
"item": {
"url": "oauth",
"secondaryUrl": "",
"categories": {
"category": [
{
"id": "403929",
"name": "OAuth2.0 Athentication",
"parent": "",
"label": "OAuth2.0 Athentication",
"entries": "0"
}
]
}
}
}
})
self.pytistory.category.list(blog_name='test-blog-5532')
@requests_mock.mock()
def test_์นดํ
๊ณ ๋ฆฌ_๋ชฉ๋ก_target_url_deprecated(self, mock):
mock.get('https://www.tistory.com/apis/category/list', json={
"tistory": {
"status": "200",
"item": {
"url": "oauth",
"secondaryUrl": "",
"categories": {
"category": [
{
"id": "403929",
"name": "OAuth2.0 Athentication",
"parent": "",
"label": "OAuth2.0 Athentication",
"entries": "0"
}
]
}
}
}
})
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.pytistory.category.list(target_url='http://oauth.tistory.com')
assert len(w) == 1
assert "A parameter `targetUrl` is deprecated." in str(
w[-1].message)
```
#### File: pytistory/tests/test_comment.py
```python
import unittest
import requests_mock
from pytistory import PyTistory
class TestComment(unittest.TestCase):
@requests_mock.mock()
def setUp(self, mock):
mock.post('https://www.tistory.com/auth/login', status_code=302)
mock.get('https://www.tistory.com/oauth/authorize', status_code=302, headers={
'Location': 'some_callback_url/#access_token=some-access-token&state=some-state'})
self.pytistory = PyTistory()
self.pytistory.configure(client_id='example client id',
tistory_id='example tistory id',
tistory_password='<PASSWORD>')
@requests_mock.mock()
def test_๋๊ธ_๋ฆฌ์คํธ_๋ฐ์์ค๊ธฐ(self, mock):
mock.get('https://www.tistory.com/apis/comment/list', json={
"tistory": {
"status": "200",
"item": {
"url": "http://oauth.tistory.com/4",
"secondaryUrl": "",
"postId": "4",
"totalCount": "3",
"comments": {
"comment": [
{
"id": "8176918",
"date": "1303796711",
"name": "์ง๋๋ค๊ฐ",
"parentId": "",
"homepage": "http://someurl.com",
"visibility": "2",
"comment": "์ข์ ๊ธ ๊ฐ์ฌํฉ๋๋ค.",
"open": "Y"
}
]
}
}
}
})
self.pytistory.comment.list(1, blog_name='test')
@requests_mock.mock()
def test_์ต๊ทผ_๋๊ธ_๋ชฉ๋ก_๊ฐ์ ธ์ค๊ธฐ(self, mock):
mock.get('https://www.tistory.com/apis/comment/newest', json={
"tistory": {
"status": "200",
"item": {
"url": "http://oauth.tistory.com",
"secondaryUrl": "",
"comments": {
"comment": [
{
"id": "8176926",
"date": "1303796900",
"postId": "4",
"name": "<NAME>",
"homepage": "http://oauth.tistory.com",
"comment": "๋น๋ฃจํ ๊ธ์ ์นญ์ฐฌ์ ํ์๋ ๋ชธ๋๋ฐ๋ฅผ ๋ชจ๋ฅด.. ์ง ์์!",
"open": "Y",
"link": "http://oauth.tistory.com/4#comment8176926"
}
]
}
}
}
})
self.pytistory.comment.newest(blog_name='test')
@requests_mock.mock()
def test_๋๊ธ_์์ฑํ๊ธฐ(self, mock):
mock.post('https://www.tistory.com/apis/comment/write', json={
"tistory": {
"status": "200",
"commentUrl": "http://oauth.tistory.com/4#comment8176976",
"result": "OK"
}
})
resp = self.pytistory.comment.write(1, '๋๊ธ-์์', blog_name='test')
self.assertEqual(resp['result'], 'OK', '๋๊ธ์ ์์ฑํ ์ ์์ต๋๋ค.')
resp = self.pytistory.comment.write(
2, '๋๊ธ-์์', blog_name='test', parent_id=1)
self.assertEqual(resp['result'], 'OK', '๋๊ธ์ ์์ฑํ ์ ์์ต๋๋ค.')
resp = self.pytistory.comment.write(
1, '๋๊ธ-์์', blog_name='test', secret=1)
self.assertEqual(resp['result'], 'OK', '๋๊ธ์ ์์ฑํ ์ ์์ต๋๋ค.')
@requests_mock.mock()
def test_๋๊ธ_์์ ํ๊ธฐ(self, mock):
mock.post('https://www.tistory.com/apis/comment/modify', json={
"tistory": {
"status": "200",
"commentUrl": "http://oauth.tistory.com/4#comment8176976",
"result": "OK"
}
})
resp = self.pytistory.comment.modify(1, 1, '์์ ๋ ๋๊ธ', blog_name='test')
self.assertEqual(resp['result'], 'OK', '๋๊ธ์ ์์ฑํ ์ ์์ต๋๋ค.')
resp = self.pytistory.comment.modify(
1, 1, '์์ ๋ ๋๊ธ', blog_name='test', parent_id=1)
self.assertEqual(resp['result'], 'OK', '๋๊ธ์ ์์ฑํ ์ ์์ต๋๋ค.')
resp = self.pytistory.comment.modify(
1, 1, '์์ ๋ ๋๊ธ', blog_name='test', secret=1)
self.assertEqual(resp['result'], 'OK', '๋๊ธ์ ์์ฑํ ์ ์์ต๋๋ค.')
@requests_mock.mock()
def test_๋๊ธ_์ญ์ ํ๊ธฐ(self, mock):
mock.post('https://www.tistory.com/apis/comment/delete', json={
"tistory": {
"status": "200"
}
})
self.pytistory.comment.delete(1, 1, blog_name='test')
```
|
{
"source": "jeongukjae/sejong-downloader",
"score": 3
}
|
#### File: sejong-downloader/sejong_downloader/cli.py
```python
import argparse
import asyncio
from .downloader import download_sejong_corpus
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--path", type=str, help="์ธ์ข
์ฝํผ์ค๊ฐ ์ ์ฅ๋ base path", default="./data", required=False)
def main():
args = parser.parse_args()
asyncio.run(download_sejong_corpus(args.path))
```
#### File: sejong-downloader/sejong_downloader/downloader.py
```python
import asyncio
import os
import re
from typing import Generator, Optional, Union
import aiofiles
import aiohttp
from .logger import logger
WORKER_COUNT = 200
SEJONG_ARTICLE_INDEXING_LINK = "https://ithub.korean.go.kr/user/total/database/corpusList.do"
SEJONG_ARTICLE_LINK = "https://ithub.korean.go.kr/user/total/database/corpusView.do"
SEJONG_DOWNLOAD_LINK = "https://ithub.korean.go.kr/common/boardFileDownload.do"
SEJONG_ZIP_DOWNLOAD_LINK = "https://ithub.korean.go.kr/common/boardFileZipDownload.do"
SEJONG_DEFAULT_REQUEST_PARAMS = {"boardSeq": 2, "boardType": "CORPUS", "userId": 0, "pageUnit": 10000}
SEJONG_DOWNLOAD_REQUEST_PARAMS = {
**SEJONG_DEFAULT_REQUEST_PARAMS,
"fileSeq": "1",
"userId": "0",
"boardSeq": "2",
"boardType": "CORPUS",
}
SEJONG_FILE_CATEGORIES = {"orgFileSeq": "1", "posFileSeq": "2", "semFileSeq": "3", "synFileSeq": "4"}
__all__ = ["download_sejong_corpus"]
class Article:
def __init__(self, article_num: str, article_sequence: str, title: str):
self.article_num = article_num.strip()
self.article_sequence = article_sequence.strip()
self.title = title.strip()
def __repr__(self):
return f"<Article num{self.article_num} {self.title} at {self.article_sequence}>"
async def download_sejong_corpus(base_path: str) -> None:
"""์ธ์ข
์ฝํผ์ค ์ ์ฒด๋ฅผ base_path๋ก ๋ค์ด๋ก๋ํ๋ค."""
os.makedirs(base_path, exist_ok=True)
os.makedirs(os.path.join(base_path, "cached"), exist_ok=True)
async with aiohttp.ClientSession() as session:
indexing_caching_path = os.path.join(base_path, "cached", "indexing.html")
indexing_page_content = await _get_cached(indexing_caching_path)
if indexing_page_content is None:
indexing_page_content = await _fetch_indexing_page(session, base_path)
await _cache_to(indexing_page_content, indexing_caching_path)
article_list = _extract_article_list_from(indexing_page_content)
logger.debug("trigger async coroutines for save article data")
queue = asyncio.Queue()
for article in article_list:
queue.put_nowait((base_path, article, session))
workers = []
for i in range(WORKER_COUNT):
worker = asyncio.create_task(_download_worker(f"worker-{i}", queue))
workers.append(worker)
await queue.join()
logger.info("complete to download all corpus data")
for worker in workers:
worker.cancel()
await asyncio.gather(*workers)
async def _download_worker(name, queue):
try:
while True:
base_path, article, session = queue.get_nowait()
await _save_attachements_in_article(base_path, article, session)
queue.task_done()
logger.info(f"complete to download {article} in {name}")
except asyncio.QueueEmpty:
logger.info(f"queue is empty now, exit {name}")
async def _get_cached(path: str) -> Optional[str]:
if os.path.exists(path) and os.path.getsize(path) != 0:
logger.debug(f"restore cache from {path}")
async with aiofiles.open(path, "r") as f:
return await f.read()
else:
return None
def _get_valid_path_name(path: str):
return path.replace("/", "")
async def _cache_to(content: Union[bytes, str], path: str) -> None:
async with aiofiles.open(path, "wb" if isinstance(content, bytes) else "w") as f:
logger.debug(f"cache to {path}")
await f.write(content)
async def _fetch_indexing_page(session: aiohttp.ClientSession, base_path: str) -> str:
logger.info("fetching indexing page")
async with session.get(SEJONG_ARTICLE_INDEXING_LINK, params=SEJONG_DEFAULT_REQUEST_PARAMS) as response:
if response.status != 200:
raise ValueError("Cannot fetch Sejong Corpus Page")
return await response.text()
def _extract_article_list_from(indexing_page_content: str) -> Generator[Article, None, None]:
pattern = re.compile(
r"<tr.*\n"
r"[ \t]*<td[^>]*>([\d]*).+\n"
r".*\n"
r".*\n"
r"[ \t]*<a href=\"javascript:goView\('([\d]*)'.*\n"
r"[ \t]*(.*)",
re.MULTILINE,
)
for item in pattern.finditer(indexing_page_content):
yield Article(*item.groups())
async def _save_attachements_in_article(base_path: str, article: Article, session: aiohttp.ClientSession) -> None:
caching_path = os.path.join(base_path, "cached", f"article_{article.article_num}.html")
article_content = await _get_cached(caching_path)
if article_content is None:
logger.info(f"fetch article {article}")
article_content = await _fetch_article(article, session)
await _cache_to(article_content, caching_path)
attachment_id = _get_attachment_id_from(article_content)
file_sequence_value = _get_file_sequence_values_from(article_content)
corpus_path = os.path.join(
base_path,
f"{int(article.article_num):04}_{_get_valid_path_name(article.title)}.{'zip' if ',' in file_sequence_value else 'text'}",
)
if os.path.exists(corpus_path) and os.path.getsize(corpus_path) != 0:
logger.debug(f"skip to download {article}")
return
async with session.post(
SEJONG_ZIP_DOWNLOAD_LINK if "," in file_sequence_value else SEJONG_DOWNLOAD_LINK,
data={
**SEJONG_DOWNLOAD_REQUEST_PARAMS,
"articleSeq": article.article_sequence,
"fNo": article.article_sequence,
"attachIdx": attachment_id,
"fileSeqValues": file_sequence_value,
},
) as response:
if response.status != 200:
raise ValueError(f"Cannot donwload {article} / {response.status} {await response.text()}")
logger.debug(f"start to download {article} ({file_sequence_value}) {response.headers['Content-Length']}")
attachment_content = await response.read()
logger.debug(f"save content {article}")
await _save_articles_attachment(corpus_path, attachment_content)
async def _fetch_article(article: Article, session: aiohttp.ClientSession) -> str:
async with session.get(
SEJONG_ARTICLE_LINK, params={**SEJONG_DEFAULT_REQUEST_PARAMS, "articleSeq": article.article_sequence}
) as response:
if response.status != 200:
raise ValueError(f"Cannot fetch article {article}")
return await response.text()
def _get_attachment_id_from(article_content: str) -> str:
pattern = re.compile(r"<input type=\"hidden\" id=\"attachIdx\" name=\"attachIdx\" value=\"([^\"]*)\"/>")
attachment_ids = pattern.findall(article_content)
if len(attachment_ids) != 1:
raise ValueError("Length of attachment ids should be 1")
return attachment_ids[0]
def _get_file_sequence_values_from(article_content: str) -> str:
pattern = re.compile(f'<input type="checkbox" name="(.*FileSeq)"')
seq_values = pattern.findall(article_content)
converted_seq_values = [SEJONG_FILE_CATEGORIES[seq] for seq in seq_values if seq in SEJONG_FILE_CATEGORIES]
return ",".join(converted_seq_values)
async def _save_articles_attachment(path: str, content: bytes) -> None:
async with aiofiles.open(path, "wb") as f:
await f.write(content)
```
|
{
"source": "jeongukjae/tf-bert",
"score": 3
}
|
#### File: tf-bert/tf_bert/modeling.py
```python
import json
import tensorflow as tf
def gelu(x):
"""Gaussian Error Linear Unit.
Original paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + tf.tanh(0.7978845608028654 * x * (1 + 0.044715 * x * x)))
def get_activation_function(hidden_act):
if hidden_act == "linear":
return None
elif hidden_act == "relu":
return tf.nn.relu
elif hidden_act == "gelu":
return gelu
elif hidden_act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % hidden_act)
def get_initializer(x):
return tf.keras.initializers.TruncatedNormal(stddev=x)
class BertConfig:
def __init__(
self,
vocab_size: int,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: str = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 16,
initializer_range: float = 0.0,
layer_norm_eps: float = 1e-12,
**kwargs, # unused
):
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.hidden_act = get_activation_function(hidden_act)
self.hidden_dropout_prob = hidden_dropout_prob
self.hidden_size = hidden_size
self.initializer_range = initializer_range
self.intermediate_size = intermediate_size
self.max_position_embeddings = max_position_embeddings
self.num_attention_heads = num_attention_heads
self.num_hidden_layers = num_hidden_layers
self.type_vocab_size = type_vocab_size
self.vocab_size = vocab_size
self.layer_norm_eps = layer_norm_eps
@staticmethod
def from_json(path: str) -> "BertConfig":
with open(path, "r") as f:
file_content = json.load(f)
return BertConfig(**file_content)
class Bert(tf.keras.Model):
def __init__(self, config: BertConfig):
super().__init__()
# embedding layer
self.token_embeddings = tf.keras.layers.Embedding(
config.vocab_size,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
)
self.token_type_embeddings = tf.keras.layers.Embedding(
config.type_vocab_size,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
)
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
)
self.embedding_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, axis=-1
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
# encoder
self.encoders = [
TransformerEncoder(config) for _ in range(config.num_hidden_layers)
]
# pooler
self.pooler_layer = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
)
def call(self, inputs):
input_ids, token_type_ids, position_ids, attention_mask = inputs
words_embeddings = self.token_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
position_embeddings = self.position_embeddings(position_ids)
attention_mask = (1.0 - attention_mask[:, tf.newaxis, :, tf.newaxis]) * -1e9
embeddings = words_embeddings + token_type_embeddings + position_embeddings
embeddings = self.embedding_layer_norm(embeddings)
hidden_states = self.dropout(embeddings)
for encoder in self.encoders:
hidden_states = encoder(hidden_states, attention_mask)
pooled_output = self.pooler_layer(hidden_states[:, 0, :])
return hidden_states, pooled_output
class TransformerEncoder(tf.keras.layers.Layer):
def __init__(self, config: BertConfig):
super().__init__()
self.attention_proj = tf.keras.layers.Dense(
config.hidden_size * 3,
kernel_initializer=get_initializer(config.initializer_range),
)
self.attention_dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
)
self.attention_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, axis=-1
)
self.intermediate_dense = tf.keras.layers.Dense(
config.intermediate_size,
kernel_initializer=get_initializer(config.initializer_range),
)
self.intermediate_act = config.hidden_act
self.intermediate_dense2 = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
)
self.intermediate_layer_norm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, axis=-1
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.scaling_factor = float(config.num_attention_heads) ** -0.5
self.num_head = config.num_attention_heads
self.attention_depth = int(config.hidden_size / self.num_head)
self.hidden_size = config.hidden_size
def call(self, sequence, attention_mask):
# multihead attention
attention, _ = self._multihead_attention(sequence, attention_mask)
# add and norm
attention = self.attention_layer_norm(attention + sequence)
# fc
intermediate = self.intermediate_dense(attention)
if self.intermediate_act is not None:
intermediate = self.intermediate_act(intermediate)
intermediate = self.dropout(self.intermediate_dense2(intermediate))
# add and norm
intermediate = self.intermediate_layer_norm(intermediate + attention)
return intermediate
def _multihead_attention(self, sequence, attention_mask):
q, k, v = tf.split(self.attention_proj(sequence), num_or_size_splits=3, axis=-1)
q = self._reshape_qkv(q)
k = self._reshape_qkv(k)
v = self._reshape_qkv(v)
# calculate attention
q *= self.scaling_factor
attention = tf.matmul(q, k, transpose_b=True)
attention_weight = tf.nn.softmax(attention, axis=-1)
attention = tf.matmul(attention_weight, v)
attention += attention_mask
# concat
attention = tf.transpose(attention, perm=[0, 2, 1, 3])
new_shape = [-1, tf.shape(attention)[1], self.hidden_size]
attention = tf.reshape(attention, new_shape)
# last dense net
attention = self.attention_dense(attention)
attention = self.dropout(attention)
return attention, attention_weight
def _reshape_qkv(self, val):
new_shape = [-1, tf.shape(val)[1], self.num_head, self.attention_depth]
return tf.transpose(tf.reshape(val, new_shape), perm=[0, 2, 1, 3])
```
|
{
"source": "jeongukjae/tfds-korean",
"score": 2
}
|
#### File: tfds_korean/klue_mrc/klue_mrc.py
```python
import json
import tensorflow as tf
import tensorflow_datasets as tfds
_DESCRIPTION = """
KLUE benchmark - Machine Reading Comprehension (MRC) task.
or more details, see [KLUE Benchmark - MRC Task - Overview description](https://klue-benchmark.com/tasks/72/overview/description)
"""
_CITATION = """
@misc{park2021klue,
title={KLUE: Korean Language Understanding Evaluation},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and Joohong Lee and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
year={2021},
eprint={2105.09680},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_LICENSE = """
This work is licensed under a [Creative Commons Attribution-ShareAlike 4.0 International License](http://creativecommons.org/licenses/by-sa/4.0/).
See also [Copyright notice](https://klue-benchmark.com/tasks/72/overview/copyright).
"""
class KlueMrc(tfds.core.GeneratorBasedBuilder):
VERSION = tfds.core.Version("1.1.0")
RELEASE_NOTES = {
"1.0.0": "Initial release.",
"1.1.0": "KLUE 1.1.0",
}
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(
{
"guid": tfds.features.Text(),
"title": tfds.features.Text(),
"news_category": tfds.features.Text(),
"source": tfds.features.Text(),
"context": tfds.features.Text(),
"question": tfds.features.Text(),
"question_type": tfds.features.Tensor(shape=[], dtype=tf.int64),
"answers": tfds.features.Sequence(
{
"text": tfds.features.Text(),
"answer_start": tfds.features.Tensor(shape=[], dtype=tf.int64),
}
),
"is_impossible": tfds.features.Tensor(shape=[], dtype=tf.bool),
}
),
supervised_keys=None,
homepage="https://github.com/KLUE-benchmark/KLUE",
citation=_CITATION,
redistribution_info={"license": _LICENSE},
)
def _split_generators(self, dl_manager: tfds.download.DownloadManager):
files = dl_manager.download_and_extract(
{
"train": "https://raw.githubusercontent.com/KLUE-benchmark/KLUE/1cc52e64c0e0b6915577244f7439c55a42199a64/klue_benchmark/klue-mrc-v1.1/klue-mrc-v1.1_train.json",
"dev": "https://raw.githubusercontent.com/KLUE-benchmark/KLUE/1cc52e64c0e0b6915577244f7439c55a42199a64/klue_benchmark/klue-mrc-v1.1/klue-mrc-v1.1_dev.json",
}
)
return {
"train": self._generate_examples(files["train"]),
"dev": self._generate_examples(files["dev"]),
}
def _generate_examples(self, path):
with path.open() as f:
for document in json.load(f)["data"]:
title, source = document["title"], document["source"]
news_category = document["news_category"] if document["news_category"] is not None else ""
for paragraph in document["paragraphs"]:
context = paragraph["context"]
for qa in paragraph["qas"]:
guid, question, question_type, is_impossible = qa["guid"], qa["question"], qa["question_type"], qa["is_impossible"]
answers = qa["answers" if question_type in [1, 2] else "plausible_answers"]
yield guid, {
"guid": guid,
"title": title,
"news_category": news_category,
"source": source,
"context": context,
"question": question,
"question_type": question_type,
"answers": answers,
"is_impossible": is_impossible,
}
```
|
{
"source": "jeongwhanchoi/graph-neural-pde",
"score": 2
}
|
#### File: graph-neural-pde/src/block_mixed.py
```python
import torch
from torch import nn
from function_transformer_attention import SpGraphTransAttentionLayer
from base_classes import ODEblock
from utils import get_rw_adj
class MixedODEblock(ODEblock):
def __init__(self, odefunc, regularization_fns, opt, data, device, t=torch.tensor([0, 1]), gamma=0.):
super(MixedODEblock, self).__init__(odefunc, regularization_fns, opt, data, device, t)
self.odefunc = odefunc(self.aug_dim * opt['hidden_dim'], self.aug_dim * opt['hidden_dim'], opt, data, device)
# self.odefunc.edge_index, self.odefunc.edge_weight = data.edge_index, edge_weight=data.edge_attr
edge_index, edge_weight = get_rw_adj(data.edge_index, edge_weight=data.edge_attr, norm_dim=1,
fill_value=opt['self_loop_weight'],
num_nodes=data.num_nodes,
dtype=data.x.dtype)
self.odefunc.edge_index = edge_index.to(device)
self.odefunc.edge_weight = edge_weight.to(device)
self.reg_odefunc.odefunc.edge_index, self.reg_odefunc.odefunc.edge_weight = self.odefunc.edge_index, self.odefunc.edge_weight
if opt['adjoint']:
from torchdiffeq import odeint_adjoint as odeint
else:
from torchdiffeq import odeint
self.train_integrator = odeint
self.test_integrator = odeint
self.set_tol()
# parameter trading off between attention and the Laplacian
self.gamma = nn.Parameter(gamma * torch.ones(1))
self.multihead_att_layer = SpGraphTransAttentionLayer(opt['hidden_dim'], opt['hidden_dim'], opt,
device).to(device)
def get_attention_weights(self, x):
attention, values = self.multihead_att_layer(x, self.odefunc.edge_index)
return attention
def get_mixed_attention(self, x):
gamma = torch.sigmoid(self.gamma)
attention = self.get_attention_weights(x)
mixed_attention = attention.mean(dim=1) * (1 - gamma) + self.odefunc.edge_weight * gamma
return mixed_attention
def forward(self, x):
t = self.t.type_as(x)
self.odefunc.attention_weights = self.get_mixed_attention(x)
integrator = self.train_integrator if self.training else self.test_integrator
if self.opt["adjoint"] and self.training:
z = integrator(
self.odefunc, x, t,
method=self.opt['method'],
options={'step_size': self.opt['step_size']},
adjoint_method=self.opt['adjoint_method'],
adjoint_options={'step_size': self.opt['adjoint_step_size']},
atol=self.atol,
rtol=self.rtol,
adjoint_atol=self.atol_adjoint,
adjoint_rtol=self.rtol_adjoint)[1]
else:
z = integrator(
self.odefunc, x, t,
method=self.opt['method'],
options={'step_size': self.opt['step_size']},
atol=self.atol,
rtol=self.rtol)[1]
return z
def __repr__(self):
return self.__class__.__name__ + '( Time Interval ' + str(self.t[0].item()) + ' -> ' + str(self.t[1].item()) \
+ ")"
```
#### File: graph-neural-pde/src/function_transformer_attention.py
```python
import torch
from torch import nn
from torch_geometric.utils import softmax
import torch_sparse
from torch_geometric.utils.loop import add_remaining_self_loops
import numpy as np
from data import get_dataset
from utils import MaxNFEException
from base_classes import ODEFunc
class ODEFuncTransformerAtt(ODEFunc):
def __init__(self, in_features, out_features, opt, data, device):
super(ODEFuncTransformerAtt, self).__init__(opt, data, device)
if opt['self_loop_weight'] > 0:
self.edge_index, self.edge_weight = add_remaining_self_loops(data.edge_index, data.edge_attr,
fill_value=opt['self_loop_weight'])
else:
self.edge_index, self.edge_weight = data.edge_index, data.edge_attr
self.multihead_att_layer = SpGraphTransAttentionLayer(in_features, out_features, opt,
device, edge_weights=self.edge_weight).to(device)
def multiply_attention(self, x, attention, v=None):
# todo would be nice if this was more efficient
if self.opt['mix_features']:
vx = torch.mean(torch.stack(
[torch_sparse.spmm(self.edge_index, attention[:, idx], v.shape[0], v.shape[0], v[:, :, idx]) for idx in
range(self.opt['heads'])], dim=0),
dim=0)
ax = self.multihead_att_layer.Wout(vx)
else:
mean_attention = attention.mean(dim=1)
ax = torch_sparse.spmm(self.edge_index, mean_attention, x.shape[0], x.shape[0], x)
return ax
def forward(self, t, x): # t is needed when called by the integrator
if self.nfe > self.opt["max_nfe"]:
raise MaxNFEException
self.nfe += 1
attention, values = self.multihead_att_layer(x, self.edge_index)
ax = self.multiply_attention(x, attention, values)
if not self.opt['no_alpha_sigmoid']:
alpha = torch.sigmoid(self.alpha_train)
else:
alpha = self.alpha_train
f = alpha * (ax - x)
if self.opt['add_source']:
f = f + self.beta_train * self.x0
return f
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
class SpGraphTransAttentionLayer(nn.Module):
"""
Sparse version GAT layer, similar to https://arxiv.org/abs/1710.10903
"""
def __init__(self, in_features, out_features, opt, device, concat=True, edge_weights=None):
super(SpGraphTransAttentionLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.alpha = opt['leaky_relu_slope']
self.concat = concat
self.device = device
self.opt = opt
self.h = int(opt['heads'])
self.edge_weights = edge_weights
try:
self.attention_dim = opt['attention_dim']
except KeyError:
self.attention_dim = out_features
assert self.attention_dim % self.h == 0, "Number of heads ({}) must be a factor of the dimension size ({})".format(
self.h, self.attention_dim)
self.d_k = self.attention_dim // self.h
self.Q = nn.Linear(in_features, self.attention_dim)
self.init_weights(self.Q)
self.V = nn.Linear(in_features, self.attention_dim)
self.init_weights(self.V)
self.K = nn.Linear(in_features, self.attention_dim)
self.init_weights(self.K)
self.activation = nn.Sigmoid() # nn.LeakyReLU(self.alpha)
self.Wout = nn.Linear(self.d_k, in_features)
self.init_weights(self.Wout)
def init_weights(self, m):
if type(m) == nn.Linear:
# nn.init.xavier_uniform_(m.weight, gain=1.414)
# m.bias.data.fill_(0.01)
nn.init.constant_(m.weight, 1e-5)
def forward(self, x, edge):
q = self.Q(x)
k = self.K(x)
v = self.V(x)
# perform linear operation and split into h heads
k = k.view(-1, self.h, self.d_k)
q = q.view(-1, self.h, self.d_k)
v = v.view(-1, self.h, self.d_k)
# transpose to get dimensions [n_nodes, attention_dim, n_heads]
k = k.transpose(1, 2)
q = q.transpose(1, 2)
v = v.transpose(1, 2)
src = q[edge[0, :], :, :]
dst_k = k[edge[1, :], :, :]
prods = torch.sum(src * dst_k, dim=1) / np.sqrt(self.d_k)
if self.opt['reweight_attention'] and self.edge_weights is not None:
prods = prods * self.edge_weights.unsqueeze(dim=1)
attention = softmax(prods, edge[self.opt['attention_norm_idx']])
return attention, v
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
opt = {'dataset': 'Cora', 'self_loop_weight': 1, 'leaky_relu_slope': 0.2, 'heads': 2, 'K': 10,
'attention_norm_idx': 0, 'add_source': False,
'alpha_dim': 'sc', 'beta_dim': 'sc', 'max_nfe': 1000, 'mix_features': False
}
dataset = get_dataset(opt, '../data', False)
t = 1
func = ODEFuncTransformerAtt(dataset.data.num_features, 6, opt, dataset.data, device)
out = func(t, dataset.data.x)
```
#### File: graph-neural-pde/src/population_search.py
```python
from functools import partial
from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining
from GNN import GNN
from run_GNN import train, test, get_optimizer, get_cora_opt
from data import get_dataset
import torch
from torch import nn
import time
import os
import argparse
import numpy as np
from ray_tune import train_ray
def set_pop_search_space(opt):
opt['decay'] = tune.loguniform(2e-3, 5e-2)
opt['hidden_dim'] = tune.sample_from(lambda _: 2 ** np.random.randint(4, 7))
opt['lr'] = tune.loguniform(0.001, 0.03)
opt['input_dropout'] = tune.uniform(0, 0.8)
opt['dropout'] = tune.uniform(0, 0.8)
if opt['ode'] == 'att':
opt['self_loop_weight'] = tune.choice([0,1])
else:
opt['self_loop_weight'] = tune.uniform(0, 5)
opt['time'] = tune.uniform(1., 10.)
opt['tol_scale'] = tune.loguniform(1e1, 1e5)
return opt
class CustomStopper(tune.Stopper):
def __init__(self, max_iter):
self.should_stop = False
self.max_iter = max_iter
def __call__(self, trial_id, result):
if not self.should_stop and result["accuracy"] > 0.96:
self.should_stop = True
return self.should_stop or result["training_iteration"] >= self.max_iter
def stop_all(self):
return self.should_stop
def main(opt):
data_dir = os.path.abspath("../data")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
opt = set_pop_search_space(opt)
scheduler = PopulationBasedTraining(
time_attr='training_iteration',
metric='accuracy',
mode='max',
resample_probability=0.75,
perturbation_interval=opt['pi'],
hyperparam_mutations={
'lr': tune.loguniform(0.001, 0.03),
'self_loop_weight': tune.uniform(0, 5),
'decay': tune.loguniform(2e-3, 5e-2),
'input_dropout': tune.uniform(0, 0.8),
'dropout': tune.uniform(0, 0.8),
# 'hidden_dim' : tune.uniform(16, 64),
'tol_scale': tune.loguniform(1e2, 1e4),
'time': tune.uniform(1., 15.),
# 'alpha_dim': ['sc', 'vc'],
'no_alpha_sigmoid': [True, False]
})
reporter = CLIReporter(
metric_columns=["accuracy", "loss", "training_iteration"])
result = tune.run(
partial(train_ray, data_dir=data_dir),
name=opt['name'],
stop=CustomStopper(opt['max_iter']),
resources_per_trial={"cpu": opt['cpus'], "gpu": opt['gpus']},
config=opt,
num_samples=opt['num_samples'],
scheduler=scheduler,
max_failures=3,
local_dir='../ray_tune',
progress_reporter=reporter,
raise_on_failed_trial=False)
best_trial = result.get_best_trial("accuracy", "max", "all")
print("Best trial config: {}".format(best_trial.config))
print("Best trial final validation loss: {}".format(
best_trial.last_result["loss"]))
print("Best trial final validation accuracy: {}".format(
best_trial.last_result["accuracy"]))
dataset = get_dataset(opt, data_dir, False)
best_trained_model = GNN(best_trial.config, dataset, device)
if opt['gpus'] > 1:
best_trained_model = nn.DataParallel(best_trained_model)
best_trained_model.to(device)
checkpoint_path = os.path.join(best_trial.checkpoint.value, "checkpoint")
model_state, optimizer_state = torch.load(checkpoint_path)
best_trained_model.load_state_dict(model_state)
test_acc = test(best_trained_model, best_trained_model.data.to(device))
print("Best trial test set accuracy: {}".format(test_acc))
df = result.dataframe(metric="accuracy", mode="max").sort_values('accuracy',
ascending=False) # get max accuracy for each trial
timestr = time.strftime("%Y%m%d-%H%M%S")
df.to_csv('../hyperopt_results/result_{}.csv'.format(timestr))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='Cora',
help='Cora, Citeseer, Pubmed, Computers, Photo, CoauthorCS')
parser.add_argument('--hidden_dim', type=int, default=32, help='Hidden dimension.')
parser.add_argument('--input_dropout', type=float, default=0.5, help='Input dropout rate.')
parser.add_argument('--dropout', type=float, default=0.1, help='Dropout rate.')
parser.add_argument('--optimizer', type=str, default='adam', help='Optimizer.')
parser.add_argument('--lr', type=float, default=0.01, help='Learning rate.')
parser.add_argument('--decay', type=float, default=0.005, help='Weight decay for optimization')
parser.add_argument('--self_loop_weight', type=float, default=1.0, help='Weight of self-loops.')
parser.add_argument('--epoch', type=int, default=50, help='Number of training epochs per iteration.')
parser.add_argument('--alpha', type=float, default=1.0, help='Factor in front matrix A.')
parser.add_argument('--time', type=float, default=7.0, help='End time of ODE function.')
parser.add_argument('--augment', action='store_true',
help='double the length of the feature vector by appending zeros to stabilist ODE learning')
parser.add_argument('--alpha_dim', type=str, default='vc', help='choose either scalar (sc) or vector (vc) alpha')
parser.add_argument('--no_alpha_sigmoid', dest='no_alpha_sigmoid', action='store_true', help='apply sigmoid before multiplying by alpha')
parser.add_argument('--beta_dim', type=str, default='vc', help='choose either scalar (sc) or vector (vc) beta')
# ODE args
parser.add_argument('--method', type=str, default='dopri5',
help="set the numerical solver: dopri5, euler, rk4, midpoint")
parser.add_argument('--ode', type=str, default='att', help="set ode block. Either 'ode', 'att', 'sde'")
parser.add_argument('--adjoint', default=False, help='use the adjoint ODE method to reduce memory footprint')
parser.add_argument('--tol_scale', type=float, default=30., help='multiplier for atol and rtol')
# SDE args
parser.add_argument('--dt_min', type=float, default=1e-5, help='minimum timestep for the SDE solver')
parser.add_argument('--dt', type=float, default=1e-3, help='fixed step size')
parser.add_argument('--adaptive', type=bool, default=False, help='use adaptive step sizes')
# Attention args
parser.add_argument('--attention_dropout', type=float, default=0., help='dropout of attention weights')
parser.add_argument('--leaky_relu_slope', type=float, default=0.2, help='slope of the negative part of the leaky relu used in attention')
parser.add_argument('--heads', type=int, default=5, help='number of attention heads')
# ray args
parser.add_argument('--num_samples', type=int, default=20, help='number of ray trials')
parser.add_argument('--gpus', type=float, default=0, help='number of gpus per trial. Can be fractional')
parser.add_argument('--cpus', type=float, default=1, help='number of cpus per trial. Can be fractional')
parser.add_argument('--name', type=str, default='ray_exp')
parser.add_argument('--max_iter', type=int, default=100,
help='maximum number of iterations of a population search trial')
parser.add_argument('--pi', type=int, default=5, help='perturbation interval: the mutation frequency')
args = parser.parse_args()
opt = vars(args)
main(opt)
```
#### File: graph-neural-pde/test/test_attention.py
```python
import unittest
import torch
from torch import tensor
from torch import nn
from function_GAT_attention import SpGraphAttentionLayer, ODEFuncAtt
from torch_geometric.utils import softmax, to_dense_adj
from data import get_dataset
class AttentionTests(unittest.TestCase):
def setUp(self):
self.edge = tensor([[0, 2, 2, 1], [1, 0, 1, 2]])
self.x = tensor([[1., 2.], [3., 2.], [4., 5.]], dtype=torch.float)
self.W = tensor([[2, 1], [3, 2]], dtype=torch.float)
self.alpha = tensor([[1, 2, 3, 4]], dtype=torch.float)
self.edge1 = tensor([[0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1]])
self.x1 = torch.ones((3, 2), dtype=torch.float)
self.leakyrelu = nn.LeakyReLU(0.2)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.opt = {'dataset': 'Cora', 'self_loop_weight': 1, 'leaky_relu_slope': 0.2, 'beta_dim': 'vc', 'heads': 2,
'K': 10,
'attention_norm_idx': 0, 'add_source': False, 'max_nfe': 1000, 'mix_features': False,
'attention_dim': 32,
'mixed_block': False, 'rewiring': None, 'no_alpha_sigmoid': False, 'reweight_attention': False,
'kinetic_energy': None, 'jacobian_norm2': None, 'total_deriv': None, 'directional_penalty': None}
def tearDown(self) -> None:
pass
def test(self):
h = torch.mm(self.x, self.W)
edge_h = torch.cat((h[self.edge[0, :], :], h[self.edge[1, :], :]), dim=1)
self.assertTrue(edge_h.shape == torch.Size([self.edge.shape[1], 2 * 2]))
ah = self.alpha.mm(edge_h.t()).t()
self.assertTrue(ah.shape == torch.Size([self.edge.shape[1], 1]))
edge_e = self.leakyrelu(ah)
attention = softmax(edge_e, self.edge[1])
print(attention)
def test_function(self):
in_features = self.x.shape[1]
out_features = self.x.shape[1]
def get_round_sum(tens, n_digits=3):
val = torch.sum(tens, dim=int(not self.opt['attention_norm_idx']))
return (val * 10 ** n_digits).round() / (10 ** n_digits)
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x, self.edge) # should be n_edges x n_heads
self.assertTrue(attention.shape == (self.edge.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(self.edge, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(self.edge, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
dataset = get_dataset(self.opt, '../data', False)
data = dataset.data
in_features = data.x.shape[1]
out_features = data.x.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(data.x, data.edge_index) # should be n_edges x n_heads
self.assertTrue(attention.shape == (data.edge_index.shape[1], self.opt['heads']))
dense_attention1 = to_dense_adj(data.edge_index, edge_attr=attention[:, 0]).squeeze()
dense_attention2 = to_dense_adj(data.edge_index, edge_attr=attention[:, 1]).squeeze()
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention1), 1.)))
self.assertTrue(torch.all(torch.eq(get_round_sum(dense_attention2), 1.)))
self.assertTrue(torch.all(attention > 0.))
self.assertTrue(torch.all(attention <= 1.))
def test_symetric_attention(self):
in_features = self.x1.shape[1]
out_features = self.x1.shape[1]
att_layer = SpGraphAttentionLayer(in_features, out_features, self.opt, self.device, concat=True)
attention, _ = att_layer(self.x1, self.edge1) # should be n_edges x n_heads
self.assertTrue(torch.all(torch.eq(attention, 0.5 * torch.ones((self.edge1.shape[1], self.x1.shape[1])))))
def test_module(self):
dataset = get_dataset(self.opt, '../data', False)
t = 1
out_dim = 6
func = ODEFuncAtt(dataset.data.num_features, out_dim, self.opt, dataset.data, self.device)
out = func(t, dataset.data.x)
print(out.shape)
self.assertTrue(out.shape == (dataset.data.num_nodes, dataset.num_features))
```
#### File: graph-neural-pde/test/test_ICML_gnn.py
```python
import unittest
import torch
from torch import tensor
from torch import nn
from GNN_ICML20 import gcn_norm_fill_val, coo2tensor, train_ray
from data import get_dataset
from torch_geometric.nn.conv.gcn_conv import gcn_norm
from torch_geometric.utils.convert import to_scipy_sparse_matrix
from ray.tune.utils import diagnose_serialization
from functools import partial
import os
from test_params import OPT
class ICMLGNNTests(unittest.TestCase):
def setUp(self):
self.edge = tensor([[0, 2, 2], [1, 0, 1]])
self.x = tensor([[1., 2.], [3., 2.], [4., 5.]], dtype=float)
self.W = tensor([[2, 1], [3, 2]], dtype=float)
self.alpha = tensor([[1, 2, 3, 4]], dtype=float)
self.leakyrelu = nn.LeakyReLU(0.2)
def tearDown(self) -> None:
pass
def test_fill_norm(self):
opt = {'dataset': 'Cora', 'improved': False, 'self_loop_weight': 1., 'rewiring': None, 'no_alpha_sigmoid': False,
'reweight_attention': False, 'kinetic_energy': None, 'jacobian_norm2': None, 'total_deriv': None, 'directional_penalty': None, 'beltrami': False}
opt = {**OPT, **opt}
dataset = get_dataset(opt, '../data', False)
data = dataset.data
edge_index1, edge_weight1 = gcn_norm(data.edge_index, data.edge_attr, data.num_nodes,
opt['improved'], opt['self_loop_weight'] > 0, dtype=data.x.dtype)
edge_index, edge_weight = gcn_norm_fill_val(data.edge_index, data.edge_attr, opt['self_loop_weight'],
data.num_nodes, dtype=data.x.dtype)
assert torch.all(edge_index.eq(edge_index1))
assert torch.all(edge_weight.eq(edge_weight1))
def main():
data_dir = os.path.abspath("../data")
trainable = partial(train_ray, data_dir=data_dir)
diagnose_serialization(trainable)
opt = {'dataset': 'Cora', 'improved': False, 'self_loop_weight': 1.}
dataset = get_dataset(opt, '../data', False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data = dataset.data
edge_index1, edge_weight1 = gcn_norm(data.edge_index, data.edge_attr, data.num_nodes,
opt['improved'], opt['self_loop_weight'] > 0, dtype=data.x.dtype)
edge_index, edge_weight = gcn_norm_fill_val(data.edge_index, data.edge_attr, opt['self_loop_weight'], data.num_nodes,
opt['self_loop_weight'] > 0)
assert torch.all(edge_index.eq(edge_index1))
assert torch.all(edge_weight.eq(edge_weight1))
coo = to_scipy_sparse_matrix(edge_index, edge_weight)
coo = coo2tensor(coo, device)
if __name__ == '__main__':
main()
```
|
{
"source": "jeongwhanchoi/LT-OCF",
"score": 2
}
|
#### File: LT-OCF/code/world.py
```python
import os
from os.path import join
import torch
from torch.nn import parallel
from parse import parse_args
import multiprocessing
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
args = parse_args()
ROOT_PATH = os.path.dirname(os.path.abspath(__file__ + "/../"))
CODE_PATH = join(ROOT_PATH, 'code')
DATA_PATH = join(ROOT_PATH, 'data')
BOARD_PATH = join(CODE_PATH, 'runs')
FILE_PATH = join(CODE_PATH, 'checkpoints')
PRETRAINED_FILE_PATH = join(CODE_PATH, 'pretrain')
import sys
sys.path.append(join(CODE_PATH, 'sources'))
if not os.path.exists(FILE_PATH):
os.makedirs(FILE_PATH, exist_ok=True)
config = {}
all_dataset = ['lastfm', 'gowalla', 'yelp2018', 'amazon-book']
all_models = ['mf', 'lgn', 'ltocf', 'ltocf2', 'ltocf1']
# config['batch_size'] = 4096
config['bpr_batch_size'] = args.bpr_batch
config['latent_dim_rec'] = args.recdim
config['lightGCN_n_layers']= args.layer
config['dropout'] = args.dropout
config['keep_prob'] = args.keepprob
config['A_n_fold'] = args.a_fold
config['test_u_batch_size'] = args.testbatch
config['multicore'] = args.multicore
config['lr'] = args.lr
config['lr_time'] = args.lr_time
config['decay'] = args.decay
config['pretrain'] = args.pretrain
config['A_split'] = False
config['bigdata'] = False
config['time_split'] = args.timesplit
config['solver'] = args.solver
config['learnable_time'] = args.learnable_time
config['dual_res'] = args.dual_res
config['pretrained_file_name'] = args.pretrained_file
config['K'] = args.K
CORES = multiprocessing.cpu_count() // 2
seed = args.seed
GPU_NUM = args.gpuid
device = torch.device(f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu')
torch.cuda.set_device(device) # change allocation of current GPU
parallel = args.parallel
dataset = args.dataset
model_name = args.model
if dataset not in all_dataset:
raise NotImplementedError(f"Haven't supported {dataset} yet!, try {all_dataset}")
if model_name not in all_models:
raise NotImplementedError(f"Haven't supported {model_name} yet!, try {all_models}")
adjoint = args.adjoint
rtol = args.rtol
atol = args.atol
TRAIN_epochs = args.epochs
LOAD = args.load
PATH = args.path
topks = eval(args.topks)
tensorboard = args.tensorboard
comment = args.comment
# let pandas shut up
from warnings import simplefilter
simplefilter(action="ignore", category=FutureWarning)
def cprint(words : str):
print(f"\033[0;30;43m{words}\033[0m")
logo = r"""
โโโโโโโโโโโโโโโโ โโโโโโโ โโโโโโโโโโโโโโโ
โโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโ
โโโ โโโโโโโโโโโโ โโโโโโ โโโโโโ
โโโ โโโโโโโโโโโโ โโโโโโ โโโโโโ
โโโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโ
โโโโโโโ โโโ โโโโโโโ โโโโโโโโโโ
"""
# font: ANSI Shadow
# http://patorjk.com/software/taag/#p=display&f=ANSI%20Shadow&t=CT-OCF
print(logo)
print ('Current cuda device ', torch.cuda.current_device()) # check
```
|
{
"source": "jeongwhanchoi/STG-NCDE",
"score": 3
}
|
#### File: STG-NCDE/model/GCDE.py
```python
import torch
import torch.nn.functional as F
import torch.nn as nn
import controldiffeq
from vector_fields import *
class NeuralGCDE(nn.Module):
def __init__(self, args, func_f, func_g, input_channels, hidden_channels, output_channels, initial, device, atol, rtol, solver):
super(NeuralGCDE, self).__init__()
self.num_node = args.num_nodes
self.input_dim = input_channels
self.hidden_dim = hidden_channels
self.output_dim = output_channels
self.horizon = args.horizon
self.num_layers = args.num_layers
self.default_graph = args.default_graph
self.node_embeddings = nn.Parameter(torch.randn(self.num_node, args.embed_dim), requires_grad=True)
self.func_f = func_f
self.func_g = func_g
self.solver = solver
self.atol = atol
self.rtol = rtol
#predictor
self.end_conv = nn.Conv2d(1, args.horizon * self.output_dim, kernel_size=(1, self.hidden_dim), bias=True)
self.init_type = 'fc'
if self.init_type == 'fc':
self.initial_h = torch.nn.Linear(self.input_dim, self.hidden_dim)
self.initial_z = torch.nn.Linear(self.input_dim, self.hidden_dim)
elif self.init_type == 'conv':
self.start_conv_h = nn.Conv2d(in_channels=input_channels,
out_channels=hidden_channels,
kernel_size=(1,1))
self.start_conv_z = nn.Conv2d(in_channels=input_channels,
out_channels=hidden_channels,
kernel_size=(1,1))
def forward(self, times, coeffs):
#source: B, T_1, N, D
#target: B, T_2, N, D
#supports = F.softmax(F.relu(torch.mm(self.nodevec1, self.nodevec1.transpose(0,1))), dim=1)
spline = controldiffeq.NaturalCubicSpline(times, coeffs)
if self.init_type == 'fc':
h0 = self.initial_h(spline.evaluate(times[0]))
z0 = self.initial_z(spline.evaluate(times[0]))
elif self.init_type == 'conv':
h0 = self.start_conv_h(spline.evaluate(times[0]).transpose(1,2).unsqueeze(-1)).transpose(1,2).squeeze()
z0 = self.start_conv_z(spline.evaluate(times[0]).transpose(1,2).unsqueeze(-1)).transpose(1,2).squeeze()
z_t = controldiffeq.cdeint_gde_dev(dX_dt=spline.derivative, #dh_dt
h0=h0,
z0=z0,
func_f=self.func_f,
func_g=self.func_g,
t=times,
method=self.solver,
atol=self.atol,
rtol=self.rtol)
# init_state = self.encoder.init_hidden(source.shape[0])
# output, _ = self.encoder(source, init_state, self.node_embeddings) #B, T, N, hidden
# output = output[:, -1:, :, :] #B, 1, N, hidden
z_T = z_t[-1:,...].transpose(0,1)
#CNN based predictor
output = self.end_conv(z_T) #B, T*C, N, 1
output = output.squeeze(-1).reshape(-1, self.horizon, self.output_dim, self.num_node)
output = output.permute(0, 1, 3, 2) #B, T, N, C
return output
```
#### File: STG-NCDE/model/vector_fields.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class FinalTanh_f(nn.Module):
def __init__(self, input_channels, hidden_channels, hidden_hidden_channels, num_hidden_layers):
super(FinalTanh_f, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = nn.Linear(hidden_channels, hidden_hidden_channels)
self.linears = nn.ModuleList(torch.nn.Linear(hidden_hidden_channels, hidden_hidden_channels)
for _ in range(num_hidden_layers - 1))
self.linear_out = nn.Linear(hidden_hidden_channels, input_channels * hidden_channels) #32,32*4 -> # 32,32,4
def extra_repr(self):
return "input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}" \
"".format(self.input_channels, self.hidden_channels, self.hidden_hidden_channels, self.num_hidden_layers)
def forward(self, z):
z = self.linear_in(z)
z = z.relu()
for linear in self.linears:
z = linear(z)
z = z.relu()
# z: torch.Size([64, 207, 32])
# self.linear_out(z): torch.Size([64, 207, 64])
z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels, self.input_channels)
z = z.tanh()
return z
class FinalTanh_f_prime(nn.Module):
def __init__(self, input_channels, hidden_channels, hidden_hidden_channels, num_hidden_layers):
super(FinalTanh_f_prime, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = nn.Linear(hidden_channels, hidden_hidden_channels)
self.linears = nn.ModuleList(torch.nn.Linear(hidden_hidden_channels, hidden_hidden_channels)
for _ in range(num_hidden_layers - 1))
# self.linear_out = nn.Linear(hidden_hidden_channels, input_channels * hidden_channels) #32,32*4 -> # 32,32,4
self.linear_out = nn.Linear(hidden_hidden_channels, hidden_channels * hidden_channels) #32,32*4 -> # 32,32,4
def extra_repr(self):
return "input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}" \
"".format(self.input_channels, self.hidden_channels, self.hidden_hidden_channels, self.num_hidden_layers)
def forward(self, z):
z = self.linear_in(z)
z = z.relu()
for linear in self.linears:
z = linear(z)
z = z.relu()
# z: torch.Size([64, 207, 32])
# self.linear_out(z): torch.Size([64, 207, 64])
# z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels, self.input_channels)
z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels, self.hidden_channels)
z = z.tanh()
return z
class FinalTanh_f2(torch.nn.Module):
def __init__(self, input_channels, hidden_channels, hidden_hidden_channels, num_hidden_layers):
super(FinalTanh_f2, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
# self.linear_in = torch.nn.Linear(hidden_channels, hidden_hidden_channels)
# self.linears = torch.nn.ModuleList(torch.nn.Linear(hidden_hidden_channels, hidden_hidden_channels)
# for _ in range(num_hidden_layers - 1))
# self.linear_out = torch.nn.Linear(hidden_hidden_channels, input_channels * hidden_channels) #32,32*4 -> # 32,32,4
self.start_conv = torch.nn.Conv2d(in_channels=hidden_channels,
out_channels=hidden_channels,
kernel_size=(1,1))
# self.linear = torch.nn.Conv2d(in_channels=hidden_channels,
# out_channels=hidden_channels,
# kernel_size=(1,1))
self.linears = torch.nn.ModuleList(torch.nn.Conv2d(in_channels=hidden_channels,
out_channels=hidden_channels,
kernel_size=(1,1))
for _ in range(num_hidden_layers - 1))
self.linear_out = torch.nn.Conv2d(in_channels=hidden_channels,
out_channels=input_channels*hidden_channels,
kernel_size=(1,1))
def extra_repr(self):
return "input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}" \
"".format(self.input_channels, self.hidden_channels, self.hidden_hidden_channels, self.num_hidden_layers)
def forward(self, z):
# z: torch.Size([64, 207, 32])
z = self.start_conv(z.transpose(1,2).unsqueeze(-1))
z = z.relu()
for linear in self.linears:
z = linear(z)
z = z.relu()
z = self.linear_out(z).squeeze().transpose(1,2).view(*z.transpose(1,2).shape[:-2], self.hidden_channels, self.input_channels)
z = z.tanh()
return z
class VectorField_g(torch.nn.Module):
def __init__(self, input_channels, hidden_channels, hidden_hidden_channels, num_hidden_layers, num_nodes, cheb_k, embed_dim,
g_type):
super(VectorField_g, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = torch.nn.Linear(hidden_channels, hidden_hidden_channels)
# self.linears = torch.nn.ModuleList(torch.nn.Linear(hidden_hidden_channels, hidden_hidden_channels)
# for _ in range(num_hidden_layers - 1))
#FIXME:
# self.linear_out = torch.nn.Linear(hidden_hidden_channels, input_channels * hidden_channels) #32,32*4 -> # 32,32,4
self.linear_out = torch.nn.Linear(hidden_hidden_channels, hidden_channels * hidden_channels) #32,32*4 -> # 32,32,4
self.g_type = g_type
if self.g_type == 'agc':
self.node_embeddings = nn.Parameter(torch.randn(num_nodes, embed_dim), requires_grad=True)
self.cheb_k = cheb_k
self.weights_pool = nn.Parameter(torch.FloatTensor(embed_dim, cheb_k, hidden_hidden_channels, hidden_hidden_channels))
self.bias_pool = nn.Parameter(torch.FloatTensor(embed_dim, hidden_hidden_channels))
def extra_repr(self):
return "input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}" \
"".format(self.input_channels, self.hidden_channels, self.hidden_hidden_channels, self.num_hidden_layers)
def forward(self, z):
z = self.linear_in(z)
z = z.relu()
if self.g_type == 'agc':
z = self.agc(z)
else:
raise ValueError('Check g_type argument')
# for linear in self.linears:
# z = linear(x_gconv)
# z = z.relu()
#FIXME:
# z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels, self.input_channels)
z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels, self.hidden_channels)
z = z.tanh()
return z #torch.Size([64, 307, 64, 1])
def agc(self, z):
"""
Adaptive Graph Convolution
- Node Adaptive Parameter Learning
- Data Adaptive Graph Generation
"""
node_num = self.node_embeddings.shape[0]
supports = F.softmax(F.relu(torch.mm(self.node_embeddings, self.node_embeddings.transpose(0, 1))), dim=1)
# laplacian=False
laplacian=False
if laplacian == True:
# support_set = [torch.eye(node_num).to(supports.device), -supports]
support_set = [supports, -torch.eye(node_num).to(supports.device)]
# support_set = [torch.eye(node_num).to(supports.device), -supports]
# support_set = [-supports]
else:
support_set = [torch.eye(node_num).to(supports.device), supports]
#default cheb_k = 3
for k in range(2, self.cheb_k):
support_set.append(torch.matmul(2 * supports, support_set[-1]) - support_set[-2])
supports = torch.stack(support_set, dim=0)
weights = torch.einsum('nd,dkio->nkio', self.node_embeddings, self.weights_pool) #N, cheb_k, dim_in, dim_out
bias = torch.matmul(self.node_embeddings, self.bias_pool) #N, dim_out
x_g = torch.einsum("knm,bmc->bknc", supports, z) #B, cheb_k, N, dim_in
x_g = x_g.permute(0, 2, 1, 3) # B, N, cheb_k, dim_in
z = torch.einsum('bnki,nkio->bno', x_g, weights) + bias #b, N, dim_out
return z
class VectorField_only_g(torch.nn.Module):
def __init__(self, input_channels, hidden_channels, hidden_hidden_channels, num_hidden_layers, num_nodes, cheb_k, embed_dim,
g_type):
super(VectorField_only_g, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = torch.nn.Linear(hidden_channels, hidden_hidden_channels)
# self.linears = torch.nn.ModuleList(torch.nn.Linear(hidden_hidden_channels, hidden_hidden_channels)
# for _ in range(num_hidden_layers - 1))
#FIXME:
self.linear_out = torch.nn.Linear(hidden_hidden_channels, input_channels * hidden_channels) #32,32*4 -> # 32,32,4
# self.linear_out = torch.nn.Linear(hidden_hidden_channels, hidden_channels * hidden_channels) #32,32*4 -> # 32,32,4
self.g_type = g_type
if self.g_type == 'agc':
self.node_embeddings = nn.Parameter(torch.randn(num_nodes, embed_dim), requires_grad=True)
self.cheb_k = cheb_k
self.weights_pool = nn.Parameter(torch.FloatTensor(embed_dim, cheb_k, hidden_hidden_channels, hidden_hidden_channels))
self.bias_pool = nn.Parameter(torch.FloatTensor(embed_dim, hidden_hidden_channels))
def extra_repr(self):
return "input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}" \
"".format(self.input_channels, self.hidden_channels, self.hidden_hidden_channels, self.num_hidden_layers)
def forward(self, z):
z = self.linear_in(z)
z = z.relu()
if self.g_type == 'agc':
z = self.agc(z)
else:
raise ValueError('Check g_type argument')
# for linear in self.linears:
# z = linear(x_gconv)
# z = z.relu()
#FIXME:
z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels, self.input_channels)
# z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels, self.hidden_channels)
z = z.tanh()
return z #torch.Size([64, 307, 64, 1])
def agc(self, z):
"""
Adaptive Graph Convolution
- Node Adaptive Parameter Learning
- Data Adaptive Graph Generation
"""
node_num = self.node_embeddings.shape[0]
supports = F.softmax(F.relu(torch.mm(self.node_embeddings, self.node_embeddings.transpose(0, 1))), dim=1)
laplacian=False
if laplacian == True:
# support_set = [torch.eye(node_num).to(supports.device), -supports]
support_set = [supports, -torch.eye(node_num).to(supports.device)]
# support_set = [torch.eye(node_num).to(supports.device), -supports]
# support_set = [-supports]
else:
support_set = [torch.eye(node_num).to(supports.device), supports]
#default cheb_k = 3
for k in range(2, self.cheb_k):
support_set.append(torch.matmul(2 * supports, support_set[-1]) - support_set[-2])
supports = torch.stack(support_set, dim=0)
weights = torch.einsum('nd,dkio->nkio', self.node_embeddings, self.weights_pool) #N, cheb_k, dim_in, dim_out
bias = torch.matmul(self.node_embeddings, self.bias_pool) #N, dim_out
x_g = torch.einsum("knm,bmc->bknc", supports, z) #B, cheb_k, N, dim_in
x_g = x_g.permute(0, 2, 1, 3) # B, N, cheb_k, dim_in
z = torch.einsum('bnki,nkio->bno', x_g, weights) + bias #b, N, dim_out
return z
class VectorField_g_prime(torch.nn.Module):
def __init__(self, input_channels, hidden_channels, hidden_hidden_channels, num_hidden_layers, num_nodes, cheb_k, embed_dim,
g_type):
super(VectorField_g_prime, self).__init__()
self.input_channels = input_channels
self.hidden_channels = hidden_channels
self.hidden_hidden_channels = hidden_hidden_channels
self.num_hidden_layers = num_hidden_layers
self.linear_in = torch.nn.Linear(hidden_channels, hidden_hidden_channels)
# self.linears = torch.nn.ModuleList(torch.nn.Linear(hidden_hidden_channels, hidden_hidden_channels)
# for _ in range(num_hidden_layers - 1))
self.linear_out = torch.nn.Linear(hidden_hidden_channels, input_channels * hidden_channels) #32,32*4 -> # 32,32,4
self.g_type = g_type
if self.g_type == 'agc':
self.node_embeddings = nn.Parameter(torch.randn(num_nodes, embed_dim), requires_grad=True)
self.cheb_k = cheb_k
self.weights_pool = nn.Parameter(torch.FloatTensor(embed_dim, cheb_k, hidden_hidden_channels, hidden_hidden_channels))
self.bias_pool = nn.Parameter(torch.FloatTensor(embed_dim, hidden_hidden_channels))
def extra_repr(self):
return "input_channels: {}, hidden_channels: {}, hidden_hidden_channels: {}, num_hidden_layers: {}" \
"".format(self.input_channels, self.hidden_channels, self.hidden_hidden_channels, self.num_hidden_layers)
def forward(self, z):
z = self.linear_in(z)
z = z.relu()
if self.g_type == 'agc':
z = self.agc(z)
else:
raise ValueError('Check g_type argument')
# for linear in self.linears:
# z = linear(x_gconv)
# z = z.relu()
z = self.linear_out(z).view(*z.shape[:-1], self.hidden_channels, self.input_channels)
z = z.tanh()
return z #torch.Size([64, 307, 64, 1])
def agc(self, z):
"""
Adaptive Graph Convolution
- Node Adaptive Parameter Learning
- Data Adaptive Graph Generation
"""
node_num = self.node_embeddings.shape[0]
supports = F.softmax(F.relu(torch.mm(self.node_embeddings, self.node_embeddings.transpose(0, 1))), dim=1)
support_set = [torch.eye(node_num).to(supports.device), supports]
#default cheb_k = 3
for k in range(2, self.cheb_k):
support_set.append(torch.matmul(2 * supports, support_set[-1]) - support_set[-2])
supports = torch.stack(support_set, dim=0)
weights = torch.einsum('nd,dkio->nkio', self.node_embeddings, self.weights_pool) #N, cheb_k, dim_in, dim_out
bias = torch.matmul(self.node_embeddings, self.bias_pool) #N, dim_out
x_g = torch.einsum("knm,bmc->bknc", supports, z) #B, cheb_k, N, dim_in
x_g = x_g.permute(0, 2, 1, 3) # B, N, cheb_k, dim_in
z = torch.einsum('bnki,nkio->bno', x_g, weights) + bias #b, N, dim_out
return z
```
|
{
"source": "jeongwhanchoi/Z-GCNETs",
"score": 3
}
|
#### File: Z-GCNETs/model/logger.py
```python
import os
import logging
from datetime import datetime
def get_logger(root, name=None, debug=True):
#when debug is true, show DEBUG and INFO in screen
#when debug is false, show DEBUG in file and info in both screen&file
#INFO will always be in screen
# create a logger
logger = logging.getLogger(name)
#critical > error > warning > info > debug > notset
logger.setLevel(logging.DEBUG)
# define the formate
formatter = logging.Formatter('%(asctime)s: %(message)s', "%Y-%m-%d %H:%M")
# create another handler for output log to console
console_handler = logging.StreamHandler()
if debug:
console_handler.setLevel(logging.DEBUG)
else:
console_handler.setLevel(logging.INFO)
# create a handler for write log to file
logfile = os.path.join(root, 'run.log')
print('Creat Log File in: ', logfile)
file_handler = logging.FileHandler(logfile, mode='w')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
# add Handler to logger
logger.addHandler(console_handler)
if not debug:
logger.addHandler(file_handler)
return logger
if __name__ == '__main__':
time = datetime.now().strftime('%Y%m%d%H%M%S')
print(time)
logger = get_logger('./log.txt', debug=True)
logger.debug('this is a {} debug message'.format(1))
logger.info('this is an info message')
logger.debug('this is a debug message')
logger.info('this is an info message')
logger.debug('this is a debug message')
logger.info('this is an info message')
```
#### File: Z-GCNETs/model/utils.py
```python
import numpy as np
import pandas as pd
import scipy.sparse as sp
import os
path = os.getcwd()
def get_adjacency_matrxix(dataset, number_nodes):
PEMS_net_dataset = pd.read_csv(path + '/data/PEMS0' + str(dataset)[5] + '/distance.csv', header=0)
PEMS_net_edges = PEMS_net_dataset.values[:, 0:2]
A = np.zeros((number_nodes, number_nodes), dtype= np.float32)
for i in range(PEMS_net_edges.shape[0]):
A[int(PEMS_net_edges[i,0] -1 ), int(PEMS_net_edges[i,1] -1 )] = 1.
A[int(PEMS_net_edges[i, 1] - 1), int(PEMS_net_edges[i, 0] -1 )] = 1.
A = sp.csr_matrix(A)
return A
# Fractional power
def fractional_fltr(adj, number_nodes, sigma, gamma):
degrees = np.array(adj.sum(1)).flatten()
degrees[np.isinf(degrees)] = 0.
D = sp.diags(degrees, 0)
L_darray = (D - adj).toarray()
D, V = np.linalg.eigh(L_darray, 'U')
M_gamma_Lambda = D
M_gamma_Lambda[M_gamma_Lambda < 1e-5] = 0
M_V = V
M_gamma_Lambda = np.float_power(M_gamma_Lambda, gamma)
M_gamma_Lambda = np.diag(M_gamma_Lambda, 0)
M_gamma_Lambda = sp.csr_matrix(M_gamma_Lambda)
M_V = sp.csr_matrix(M_V)
Lg = M_V * M_gamma_Lambda
Lg = Lg * sp.csr_matrix.transpose(M_V)
Lg = Lg.toarray()
Lg = Lg.reshape(1, -1)
Lg[abs(Lg) < 1e-5] = 0.
Lg = Lg.reshape(number_nodes, -1)
Dg = np.diag(np.diag(Lg))
Ag = Dg - Lg
Ag = sp.csr_matrix(Ag)
power_Dg_l = np.float_power(np.diag(Dg), -sigma)
power_Dg_l = sp.csr_matrix(np.diag(power_Dg_l))
power_Dg_r = np.float_power(np.diag(Dg), (sigma - 1))
power_Dg_r = sp.csr_matrix(np.diag(power_Dg_r))
fractional_fltr = power_Dg_l * Ag
fractional_fltr = fractional_fltr * power_Dg_r
return fractional_fltr
trans_adj = get_adjacency_matrxix(dataset= 'PEMS04', number_nodes= 307)
#frac_fltr = fractional_fltr(adj= trans_adj, number_nodes= 307, sigma= 0.5, gamma= 2.)
sp.save_npz('PEMSD4_adj.npz', trans_adj)
#test = sp.load_npz('PEMSD4_fltr.npz')
```
#### File: Z-GCNETs/model/ZGCNETs.py
```python
import torch
import torch.nn as nn
from ZGCNCELL import NLSGCRNCNNCell
class NNLSDCRNNCNN(nn.Module):
def __init__(self, node_num, dim_in, dim_out, link_len, embed_dim, num_layers=1, window_len = 12):
super(NNLSDCRNNCNN, self).__init__()
assert num_layers >= 1, 'At least one DCRNN layer in the Encoder.'
self.node_num = node_num
self.input_dim = dim_in
self.num_layers = num_layers
self.window_len = window_len
self.nlsdcrnncnn_cells = nn.ModuleList()
for _ in range(0, num_layers-1):
self.nlsdcrnncnn_cells.append(NLSGCRNCNNCell(node_num, dim_in, dim_out, window_len, link_len, embed_dim))
for _ in range(num_layers-1, num_layers):
self.nlsdcrnncnn_cells.append(NLSGCRNCNNCell(node_num, dim_out, dim_out, window_len, link_len, embed_dim))
def forward(self, x, init_state, node_embeddings, zigzag_PI):
assert x.shape[2] == self.node_num and x.shape[3] == self.input_dim
seq_length = x.shape[1]
current_inputs = x
output_hidden = []
for i in range(self.num_layers):
state = init_state[i]
inner_states = []
for t in range(seq_length):
state = self.nlsdcrnncnn_cells[i](current_inputs[:, t, :, :], state, current_inputs,
node_embeddings, zigzag_PI[:, :, :].view(-1, 1, 100, 100)) #zigzag PI input shape
inner_states.append(state)
output_hidden.append(state)
current_inputs = torch.stack(inner_states, dim=1)
return current_inputs, output_hidden
def init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.nlsdcrnncnn_cells[i].init_hidden_state(batch_size))
return torch.stack(init_states, dim=0) #(num_layers, B, N, hidden_dim)
#------------------------------------------------------------------------------------------------------------------------#
class NNLSGCRNCNN(nn.Module):
def __init__(self, args):
super(NNLSGCRNCNN, self).__init__()
self.num_node = args.num_nodes
self.input_dim = args.input_dim
self.hidden_dim = args.rnn_units
self.output_dim = args.output_dim
self.horizon = args.horizon
self.num_layers = args.num_layers
self.default_graph = args.default_graph
self.node_embeddings = nn.Parameter(torch.randn(self.num_node, args.embed_dim), requires_grad=True)
self.encoder = NNLSDCRNNCNN(args.num_nodes, args.input_dim, args.rnn_units, args.link_len,
args.embed_dim, args.num_layers, args.window_len)
#predictor
self.end_conv = nn.Conv2d(1, args.horizon * self.output_dim, kernel_size=(1, self.hidden_dim), bias=True)
def forward(self, source, targets, zigzag_PI, teacher_forcing_ratio=0.5):
#source: B, T_1, N, D
#target: B, T_2, N, D
init_state = self.encoder.init_hidden(source.shape[0])
output, _ = self.encoder(source, init_state, self.node_embeddings, zigzag_PI) #B, T, N, hidden
output = output[:, -1:, :, :] #B, 1, N, hidden
#CNN based predictor
output = self.end_conv((output)) #B, T*C, N, 1
output = output.squeeze(-1).reshape(-1, self.horizon, self.output_dim, self.num_node)
output = output.permute(0, 1, 3, 2) #B, T, N, C
return output
```
|
{
"source": "jeongwonkwak/Korean-to-English-NMT",
"score": 3
}
|
#### File: Korean-to-English-NMT/simple_nmt/decoder.py
```python
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
class Decoder(nn.Module):
def __init__(self, word_vec_dim, hidden_size, n_layers=4, dropout_p=.2):
super(Decoder, self).__init__()
# Be aware of value of 'batch_first' parameter and 'bidirectional' parameter.
self.rnn = nn.LSTM(word_vec_dim + hidden_size,
hidden_size,
num_layers=n_layers,
dropout=dropout_p,
bidirectional=False,
batch_first=True
)
def forward(self, emb_t, h_t_1_tilde, h_t_1):
# |emb_t| = (batch_size, 1, word_vec_dim)
# |h_t_1_tilde| = (batch_size, 1, hidden_size)
# |h_t_1[0]| = (n_layers, batch_size, hidden_size)
batch_size = emb_t.size(0)
hidden_size = h_t_1[0].size(-1)
if h_t_1_tilde is None:
# If this is the first time-step,
h_t_1_tilde = emb_t.new(batch_size, 1, hidden_size).zero_()
# Input feeding trick.
x = torch.cat([emb_t, h_t_1_tilde], dim=-1)
# Unlike encoder, decoder must take an input for sequentially.
y, h = self.rnn(x, h_t_1)
return y, h
class Generator(nn.Module):
def __init__(self, hidden_size, output_size):
super(Generator, self).__init__()
self.output = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
# |x| = (batch_size, length, hidden_size)
y = self.softmax(self.output(x))
# |y| = (batch_size, length, output_size)
# Return log-probability instead of just probability.
print("***********generator****************")
return y
```
|
{
"source": "Jeongwoo-KGI/maplestory_dpm_calc",
"score": 2
}
|
#### File: dpmModule/jobs/cadena.py
```python
from ..kernel.graph import DynamicVariableOperation
from ..kernel import core
from ..character import characterKernel as ck
from functools import partial
from ..status.ability import Ability_tool
from . import globalSkill
from .jobbranch import thieves
from .jobclass import nova
from math import ceil
from typing import Any, Dict
###### Passive Skill ######
class WeaponVarietyStackWrapper(core.StackSkillWrapper):
def __init__(self, _max, prof_agent, final_attack, use_prof_agent_attack):
super(WeaponVarietyStackWrapper, self).__init__(core.BuffSkill("์จํฐ ๋ฒ๋ผ์ด์ดํฐ ์คํ", 0, 99999999), _max)
self.currentWeapon = None
self.use_final_attack = core.OptionalElement(final_attack.is_available, final_attack, name = "์จํฐ ๋ฒ๋ผ์ด์ดํฐ ์ฟจํ์")
self.use_prof_agent_attack = use_prof_agent_attack
self.prof_agent = prof_agent
self.modifierInvariantFlag = False
def vary(self, weapon):
self.currentWeapon = weapon
return core.ResultObject(0, core.CharacterModifier(), 0, 0, sname = self.skill.name, spec = 'graph control')
def get_modifier(self):
multiplier = 11
if self.prof_agent.is_active():
multiplier *= 2
return core.CharacterModifier(pdamage_indep = 8 * multiplier)
def _changed(self, weapon):
return self.currentWeapon != weapon
def stackController(self, weapon):
task = core.Task(self, partial(self.vary, weapon))
taskHolder = core.TaskHolder(task, name = "์จ๋ฒ ์คํ")
taskHolder.onAfter(self.use_final_attack)
taskHolder.onAfter(self.use_prof_agent_attack)
conditionalTask = core.OptionalElement(partial(self._changed, weapon), taskHolder, name = "๋ฌด๊ธฐ ๊ต์ฒด")
return conditionalTask
class MaelstromWrapper(core.SummonSkillWrapper):
def __init__(self, vEhc, tick):
self.tick_list = [840, 120, 180, 270, 390, 540, 720, 930, 1170, 1440, 1740, 99999999]
self.currentTick = len(self.tick_list)
self.reuseTick = tick
skill = core.SummonSkill("์ฒด์ธ์์ธ :๋ฉ์ผ์คํธ๋กฌ", 540, 0, 300+12*vEhc.getV(3,2), 4, 8540).isV(vEhc,3,2)
super(MaelstromWrapper, self).__init__(skill)
def check_use(self):
return self.currentTick >= self.reuseTick
def _use(self, skill_modifier):
result = super(MaelstromWrapper, self)._use(skill_modifier)
self.tick = self.tick_list[0]
self.currentTick = 1
return result
def _useTick(self):
result = super(MaelstromWrapper, self)._useTick()
self.currentTick += 1
return result
def get_delay(self) -> float:
return self.tick_list[self.currentTick]
def comboBuilder(name, skill_list):
combo = core.DamageSkill(name, 0, 0, 0).wrap(core.DamageSkillWrapper)
for sk in skill_list:
combo.onAfter(sk)
delaySum = 0
cnst_list = []
for sk in skill_list:
if DynamicVariableOperation.reveal_argument(sk.skill.cooltime) > 0:
cnst_list += [core.ConstraintElement(sk._id + "(์ฟจํ์)", sk, partial(sk.is_cooltime_left, delaySum, -1))]
delaySum += DynamicVariableOperation.reveal_argument(sk.skill.delay)
for cnst in cnst_list:
combo.onConstraint(cnst)
return combo
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.vEnhanceNum = 11
self.jobtype = "luk"
self.jobname = "์นด๋ฐ๋"
self.ability_list = Ability_tool.get_ability_set('boss_pdamage', 'reuse', 'mess') # ์์๋ก ๋ณด๊ณต ์ฒซ์ค ์ฌ์ฉ, ์ฌ์ฌ์ฉ ๊ตฌํ์ ๋ณ๊ฒฝ
self.preEmptiveSkills = 1
def get_passive_skill_list(self, vEhc, chtr : ck.AbstractCharacter, options: Dict[str, Any]):
passive_level = chtr.get_base_modifier().passive_level + self.combat
CollectingForLeap = core.InformedCharacterModifier("์ฝ๋ ํ
ํฌ๋ฆฌํ", stat_main = 50)
PhisicalTraining = core.InformedCharacterModifier("ํผ์ง์ปฌ ํธ๋ ์ด๋", stat_main = 30, stat_sub = 30)
QuickserviceMind = core.InformedCharacterModifier("ํต์๋น์ค ๋ง์ธ๋", att = 10, crit_damage = 5, crit = 10)
BasicDetection = core.InformedCharacterModifier("๋ฒ ์ด์ง ๋ํ
์
", armor_ignore = 20)
WeaponMastery = core.InformedCharacterModifier("์จํฐ ์์คํผํธ", att = 30 + passive_level, crit = 30 + passive_level, crit_damage = 15 + ceil(passive_level / 2))
QuickserviceMind_II = core.InformedCharacterModifier("ํต์๋น์ค ๋ง์ธ๋ II", att = 30, crit_damage = 5, crit = 10)
ReadyToDiePassive = thieves.ReadyToDiePassiveWrapper(vEhc, 2, 3)
return [CollectingForLeap, PhisicalTraining,
QuickserviceMind, BasicDetection, WeaponMastery, QuickserviceMind_II, ReadyToDiePassive]
def get_modifier_optimization_hint(self):
return core.CharacterModifier(armor_ignore = 30, crit_damage = 44, pdamage = 20, crit = 6)
def get_not_implied_skill_list(self, vEhc, chtr : ck.AbstractCharacter, options: Dict[str, Any]):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("๋ฌด๊ธฐ์์",pdamage_indep = 30)
Mastery = core.InformedCharacterModifier("์๋ จ๋",pdamage_indep = -5 + 0.5 * ceil(passive_level / 2))
return [WeaponConstant, Mastery]
def generate(self, vEhc, chtr : ck.AbstractCharacter, options: Dict[str, Any]):
'''
๋
ผ์ฒด์ธ์์ธ -๋ฆฌ์ธํฌ์ค, ๋ณด์คํฌ๋ฌ
์ฒด์ธ์์ธ ์คํธ๋กํฌ-๋ฅ์คํธ์ดํ ๋ฆฌ์ธํฌ์ค, ๋ฆฌ์ธํฌ์ค
์ฒด์ธ์์ธ :ํ
์ดํฌ๋ค์ด-์ฟจํ์ ๋ฆฌ๋์ค
์ฝ๊ฐ์์:
์คํธ๋กํฌ,๋ฒ๋ผ์ด์ดํฐ,ํ์ฌ - ๋๋ค๋ฐฐํธ - ๋ธ๋ฆญ - ์ท๊ฑด/๋ด - ํฌ๋ฌ์/์์ด์ ํธ - ์๋ฏธํฐ/ํด๋ก - ๋์ดํ/์๋๊ฑฐ - ํ
์ดํฌ๋ค์ด
์คํฌ๊ฐํ์์:
ํจ๋ฆฌ-์ค๋-๋ฒ์คํธ-๋ฉ-๋ ํฌ๋ค
1ํ์บ์ฌ 150ms
์บ์ฌ 180ms
์๋จผ ์ค๋ก์ ์๋๊ฑฐ 3ํ ํ ํญ๋ฐ
์๋ฏธํฐ, ์ท๊ฑด, ๋์ดํ, ๋ธ๋ฆญ, ๋๋ค๋ฐฐํธ 1ํ, ์๋๊ฑฐ 1ํฑ์ ์ต์ข
๋ 15%๋ฅผ ๋ฐ๊ณ ์์.
๋ด-๋ธ๋ฆญ / ์ท๊ฑด-ํด๋ก / ๋์ดํ / ์๋๊ฑฐ / ๋ฐฐํธ / ์๋ฏธํฐ-์ฒด์ด์ค / ๋ฉ์ผ์คํธ๋กฌ 4์ด๋น 1ํ
'''
STROKE1_HIT_RATE = 1
STROKE1_CANCEL_TIME = 150
CANCEL_TIME = 180
WINGDAGGER_HIT = 3
passive_level = chtr.get_base_modifier().passive_level + self.combat
CheapShotII = core.CharacterModifier(crit = 2, crit_damage = 10 + ceil(passive_level / 4)) # ์ํฌํฌ์ธํธ ์ปจ๋ฒ์ง ์ดํ
CheapShotIIBleed = core.DotSkill("์ํฌํฌ์ธํธ ์ปจ๋ฒ์ง ์ดํ(์ถํ)", 0, 1000, 110 + 2 * passive_level, 1, 99999999).wrap(core.DotSkillWrapper)
CheapShotIIBleedBuff = core.BuffSkill("์ํฌํฌ์ธํธ ์ปจ๋ฒ์ง ์ดํ(์ถํ)(๋๋ฒํ)", 0, 99999999, crit = CheapShotII.crit, crit_damage = CheapShotII.crit_damage).wrap(core.BuffSkillWrapper)
CheapShotIIAdventureMageBuff = core.BuffSkill("์ํฌํฌ์ธํธ ์ปจ๋ฒ์ง ์ดํ(๋ชจ๋ฒ๋งํฌ)", 0, 99999999, crit = CheapShotII.crit, crit_damage = CheapShotII.crit_damage).wrap(core.BuffSkillWrapper)
#๋ฒํ
Booster = core.BuffSkill("๋ถ์คํฐ", 0, 200000).wrap(core.BuffSkillWrapper)
SpecialPotion = core.BuffSkill("์์ธ๋จ ํน์ ๋น์ฝ", 570, 60*1000, pdamage = 10, crit = 10, cooltime = 120*1000).wrap(core.BuffSkillWrapper) # ์นด๋ฐ๋๋ง ๋๋ ์ด์์
ProfessionalAgent = core.BuffSkill("ํ๋กํ์
๋ ์์ด์ ํธ", 570, 30000, cooltime = 200000).wrap(core.BuffSkillWrapper)
ProfessionalAgentAdditionalDamage = core.DamageSkill("ํ๋กํ์
๋ ์์ด์ ํธ(๊ณต๊ฒฉ)", 0, 255, 2).setV(vEhc, 4, 2, False).wrap(core.DamageSkillWrapper)
ProfessionalAgent_Attack = core.OptionalElement(ProfessionalAgent.is_active, ProfessionalAgentAdditionalDamage, name= "ํ๋กํ์
๋ ์์ด์ ํธ ์ถ๊ฐํ")
#์จํฐ๋ฒ๋ผ์ด์ดํฐ ์ถ๊ฐํ
WeaponVarietyAttack = core.DamageSkill("์จํฐ ๋ฒ๋ผ์ด์ดํฐ", 0, 350 + 15 * passive_level, 4, cooltime = 250).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
WeaponVariety = WeaponVarietyStackWrapper(11, ProfessionalAgent, WeaponVarietyAttack, ProfessionalAgent_Attack)
#์ฒด์ธ์์ธ
ChainArts_Stroke_1 = core.DamageSkill("์ฒด์ธ์์ธ :์คํธ๋กํฌ(1ํ)", 210, 150, 2 * STROKE1_HIT_RATE, modifier = core.CharacterModifier(pdamage = 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
ChainArts_Stroke_1_Cancel = core.DamageSkill("์ฒด์ธ์์ธ :์คํธ๋กํฌ(1ํ)(์บ์ฌ)", STROKE1_CANCEL_TIME, 150, 2 * STROKE1_HIT_RATE, modifier = core.CharacterModifier(pdamage = 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
ChainArts_Stroke_2 = core.DamageSkill("์ฒด์ธ์์ธ :์คํธ๋กํฌ(2ํ)", 390, 400, 5, modifier = core.CharacterModifier(pdamage = 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
ChainArts_Stroke_2_Cancel = core.DamageSkill("์ฒด์ธ์์ธ :์คํธ๋กํฌ(2ํ)(์บ์ฌ)", CANCEL_TIME, 400, 5, modifier = core.CharacterModifier(pdamage = 20)).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
ChainArts_Chais = core.DamageSkill("์ฒด์ธ์์ธ :์ฒด์ด์ค", 150, 100, 1).wrap(core.DamageSkillWrapper)
ChainArts_Crush = core.DamageSkill("์ฒด์ธ์์ธ :ํฌ๋ฌ์", 750, 510, 15, cooltime = 30000).setV(vEhc, 4, 2, True).wrap(core.DamageSkillWrapper) # ๋ฏธ์ฌ์ฉ
#ChainArts_ToughHustleInit = core.DamageSkill("์ฒด์ธ์์ธ :ํฐํํ์ฌ", 0, 0, 0, cooltime = 50000).setV(vEhc, 0, 2, False) #์ง์ํ
#ChainArts_ToughHustle = core.DamageSkill("์ฒด์ธ์์ธ :ํฐํํ์ฌ", 5000000, 600 + 7 * self.combat, 2).setV(vEhc, 0, 2, False) #์ง์ํ, 6์ด, ๋ฏธ์ฌ์ฉ
ChainArts_Takedown_Init = core.DamageSkill("์ฒด์ธ์์ธ :ํ
์ดํฌ๋ค์ด", 4080, 300+3*self.combat, 2, cooltime = (150-30)*1000, red=True).setV(vEhc, 7, 2, False).wrap(core.DamageSkillWrapper)
ChainArts_Takedown_Attack = core.DamageSkill("์ฒด์ธ์์ธ :ํ
์ดํฌ๋ค์ด(์ฐ์ ๊ณต๊ฒฉ)", 2970, 990+15*self.combat, 15, modifier = core.CharacterModifier(armor_ignore = 80)).setV(vEhc, 7, 2, False).wrap(core.DamageSkillWrapper)
ChainArts_Takedown_Wave = core.DamageSkill("์ฒด์ธ์์ธ :ํ
์ดํฌ๋ค์ด(ํ๋)", 0, 600+5*self.combat, 4).setV(vEhc, 7, 2, False).wrap(core.DamageSkillWrapper) # 8ํ ๋ฐ๋ณต
ChainArts_Takedown_Final = core.DamageSkill("์ฒด์ธ์์ธ :ํ
์ดํฌ๋ค์ด(์ต์ข
)", 0, 500, 10, modifier = core.CharacterModifier(armor_ignore = 80)).setV(vEhc, 7, 2, False).wrap(core.DamageSkillWrapper)
ChainArts_Takedown_Bind = core.BuffSkill("์ฒด์ธ์์ธ :ํ
์ดํฌ๋ค์ด(๋ฐ์ธ๋)", 0, 10000, crit = CheapShotII.crit, crit_damage = CheapShotII.crit_damage, cooltime = -1).wrap(core.BuffSkillWrapper)
#๋
ผ์ฒด์ธ์์ธ ์คํฌ
SummonCuttingSimiter = core.DamageSkill("์๋จผ ์ปคํ
์๋ฏธํฐ", CANCEL_TIME, 425 + 5 * passive_level, 5, cooltime = 4000, red = True, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20, pdamage_indep = 15)).setV(vEhc, 5, 2, False).wrap(core.DamageSkillWrapper)
SummonScratchingClaw = core.DamageSkill("์๋จผ ์คํฌ๋์นญ ํด๋ก", CANCEL_TIME, 455 + 5 * passive_level, 4, cooltime = 3000, red = True, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20)).setV(vEhc, 5, 2, False).wrap(core.DamageSkillWrapper)
SummonThrowingWingdagger = core.DamageSkill("์๋จผ ์ค๋ก์ ์๋๊ฑฐ", 780, 0, 0, cooltime = 10000, red = True, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20)).wrap(core.DamageSkillWrapper)
SummonThrowingWingdaggerSummon = core.SummonSkill("์๋จผ ์ค๋ก์ ์๋๊ฑฐ(์ํ)", 0, 330, 425 + 5 * passive_level, 1, 330*WINGDAGGER_HIT, cooltime= -1, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20, pdamage_indep = 15 / WINGDAGGER_HIT)).setV(vEhc, 6, 2, False).wrap(core.SummonSkillWrapper)
SummonThrowingWingdaggerEnd = core.DamageSkill("์๋จผ ์ค๋ก์ ์๋๊ฑฐ(ํญ๋ฐ)", 0, 670 + 5 * passive_level, 3, cooltime = -1, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20)).setV(vEhc, 6, 2, False).wrap(core.DamageSkillWrapper)
SummonShootingShotgun = core.DamageSkill("์๋จผ ์ํ
์ท๊ฑด", CANCEL_TIME, 510 + 5 * passive_level, 7, cooltime = 5000, red = True, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20, pdamage_indep = 15)).setV(vEhc, 3, 2, False).wrap(core.DamageSkillWrapper)
SummonSlachingKnife = core.DamageSkill("์๋จผ ์ฌ๋์ฑ ๋์ดํ", CANCEL_TIME, 435 + 5 * passive_level, 8, cooltime = 10000, red = True, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20, pdamage_indep = 15)).setV(vEhc, 6, 2, False).wrap(core.DamageSkillWrapper)
SummonSlachingKnife_Horror = core.BuffSkill("์๋จผ ์ฌ๋์ฑ ๋์ดํ(๊ณตํฌ)", 0, 10000, armor_ignore = 30, crit = CheapShotII.crit, crit_damage = CheapShotII.crit_damage, cooltime = -1).wrap(core.BuffSkillWrapper)
SummonReleasingBoom = core.DamageSkill("์๋จผ ๋ฆด๋ฆฌ์ง ๋ด", CANCEL_TIME, 0, 0, cooltime = 8000, red = True).setV(vEhc, 3, 2, False).wrap(core.DamageSkillWrapper)
SummonReleasingBoom_Explode = core.DamageSkill("์๋จผ ๋ฆด๋ฆฌ์ง ๋ด(ํญ๋ฐ)", 0, 535 + 5 * passive_level, 6, cooltime = -1, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20)).setV(vEhc, 3, 2, False).wrap(core.DamageSkillWrapper)
SummonStrikingBrick = core.DamageSkill("์๋จผ ์คํธ๋ผ์ดํน ๋ธ๋ฆญ", 390+CANCEL_TIME-STROKE1_CANCEL_TIME, 485 + 8*self.combat, 7, cooltime = 8000, red = True, modifier = core.CharacterModifier(boss_pdamage = 20, pdamage = 20, pdamage_indep = 15)).setV(vEhc, 2, 2, False).wrap(core.DamageSkillWrapper)
# ๊ฐ์ ์ด๋ 390ms, ํญ๋ฐ 360ms(์บ์ฌ๊ฐ๋ฅ), ๊ฐ์ ์ด๋ ๋์ค 1ํ ์ฌ์ฉ ๊ฐ๋ฅํ๋ฏ๋ก 1ํ ๋๋ ์ด๋งํผ ๋บ.
SummonBeatingNeedlebat_1 = core.DamageSkill("์๋จผ ๋นํ
๋๋ค๋ฐฐํธ(1ํ)", 360, 450 + 10 * self.combat, 6, modifier = core.CharacterModifier(pdamage = 40 + 20, boss_pdamage = 20, pdamage_indep = 15), cooltime = 12000, red = True).setV(vEhc, 1, 2, False).wrap(core.DamageSkillWrapper)
SummonBeatingNeedlebat_2 = core.DamageSkill("์๋จผ ๋นํ
๋๋ค๋ฐฐํธ(2ํ)", 420, 555 + 10 * self.combat, 7, modifier = core.CharacterModifier(pdamage = 40 + 20, boss_pdamage = 20)).setV(vEhc, 1, 2, False).wrap(core.DamageSkillWrapper)
SummonBeatingNeedlebat_3 = core.DamageSkill("์๋จผ ๋นํ
๋๋ค๋ฐฐํธ(3ํ)", CANCEL_TIME, 715 + 10 * self.combat, 8, modifier = core.CharacterModifier(pdamage = 50 + 20, boss_pdamage = 20)).setV(vEhc, 1, 2, False).wrap(core.DamageSkillWrapper)
SummonBeatingNeedlebat_Honmy = core.BuffSkill("์๋จผ ๋นํ
๋๋ค๋ฐฐํธ(ํผ๋ฏธ)", 0, 15000, crit = CheapShotII.crit, crit_damage = CheapShotII.crit_damage, cooltime = -1).wrap(core.BuffSkillWrapper)
# 5์ฐจ
VenomBurst = core.DotSkill("๋ฒ ๋ ๋ฒ์คํธ", 0, 1000, 160+6*vEhc.getV(4,4), 1, 99999999).isV(vEhc,4,4).wrap(core.DotSkillWrapper)
VenomBurst_Poison = core.BuffSkill("๋ฒ ๋ ๋ฒ์คํธ(์ค๋
)", 0, 99999999, crit = CheapShotII.crit, crit_damage = CheapShotII.crit_damage, cooltime = -1).isV(vEhc,4,4).wrap(core.BuffSkillWrapper)
ReadyToDie = thieves.ReadyToDieWrapper(vEhc, 2, 3)
MirrorBreak, MirrorSpider = globalSkill.SpiderInMirrorBuilder(vEhc, 0, 0)
NovaGoddessBless = nova.NovaGoddessBlessWrapper(vEhc, 0, 0)
ChainArts_Fury = core.BuffSkill("์ฒด์ธ์์ธ :ํจ๋ฆฌ", 420, (35+vEhc.getV(0,0))*1000, cooltime = (180-vEhc.getV(0,0))*1000, red = True).isV(vEhc,0,0).wrap(core.BuffSkillWrapper)
ChainArts_Fury_Damage = core.DamageSkill("์ฒด์ธ์์ธ :ํจ๋ฆฌ(๊ณต๊ฒฉ)", 0, 250+10*vEhc.getV(0,0), 6, cooltime = 600).isV(vEhc,0,0).wrap(core.DamageSkillWrapper)
AD_Odnunce = core.SummonSkill("A.D ์ค๋๋์ค", 360, 270, 225+9*vEhc.getV(1,1), 5, 10000, cooltime = 25000, red = True).isV(vEhc,1,1).wrap(core.SummonSkillWrapper) # 37*5ํ
AD_Odnunce_Final = core.DamageSkill("A.D ์ค๋๋์ค(๋งํ)", 0, 750+30*vEhc.getV(1,1), 8, cooltime = -1).isV(vEhc,1,1).wrap(core.DamageSkillWrapper)
ChainArts_Maelstorm = MaelstromWrapper(vEhc, 8)
ChainArts_Maelstorm_Slow = core.BuffSkill("์ฒด์ธ์์ธ :๋ฉ์ผ์คํธ๋กฌ(์ค๋
)", 0, 4000+6000, crit = CheapShotII.crit, crit_damage = CheapShotII.crit_damage, cooltime = -1).isV(vEhc,3,2).wrap(core.BuffSkillWrapper)
WeaponVarietyFinale = core.StackableDamageSkillWrapper(
core.DamageSkill("์จํฐ ๋ฒ๋ผ์ด์ดํฐ ํผ๋ ๋ ", 0, 250+10*vEhc.getV(0, 0), 7*4, cooltime=11000).isV(vEhc, 0, 0),
3
)
WeaponVarietyFinaleTrigger = core.StackSkillWrapper(core.BuffSkill("์จํฐ ๋ฒ๋ผ์ด์ดํฐ ํผ๋ ๋ (์จ๋ฒํ์)", 0, 99999999), 4)
###### Skill Wrapper ######
#๊ธฐ๋ณธ ์ฐ๊ณ ์ฐ๊ฒฐ
#ChainArts_ToughHustleInit.onAfter(ChainArts_ToughHustle) ํฐํํ์ฌ ๋ฏธ์ฌ์ฉ
ChainArts_Takedown_Init.onBefore(ChainArts_Takedown_Bind)
ChainArts_Takedown_Init.onAfter(ChainArts_Takedown_Attack)
ChainArts_Takedown_Init.onAfter(core.RepeatElement(ChainArts_Takedown_Wave, 8))
ChainArts_Takedown_Init.onAfter(ChainArts_Takedown_Final)
SummonThrowingWingdagger.onAfter(SummonThrowingWingdaggerSummon)
SummonThrowingWingdaggerSummon.onAfter(SummonThrowingWingdaggerEnd.controller(330*WINGDAGGER_HIT))
SummonSlachingKnife.onAfter(SummonSlachingKnife_Horror)
SummonReleasingBoom.onAfter(SummonReleasingBoom_Explode.controller(1000)) # 1์ด ํ ํญ๋ฐ
VenomBurst.onAfter(VenomBurst_Poison)
ChainArts_Fury_Use = core.OptionalElement(lambda : ChainArts_Fury_Damage.is_available() and ChainArts_Fury.is_active(), ChainArts_Fury_Damage, name = "์ฒด์ธ์์ธ :ํจ๋ฆฌ ๋ฐ๋์กฐ๊ฑด")
AD_Odnunce.onEventEnd(AD_Odnunce_Final)
ChainArts_Maelstorm.onAfter(ChainArts_Maelstorm_Slow)
# ์จํฐ ๋ฒ๋ผ์ด์ดํฐ ํธ์ถ
SummonCuttingSimiter.onAfter(WeaponVariety.stackController("์๋ฏธํฐ"))
SummonScratchingClaw.onAfter(WeaponVariety.stackController("ํด๋ก"))
SummonThrowingWingdaggerSummon.onTick(WeaponVariety.stackController("์๋๊ฑฐ"))
SummonThrowingWingdaggerEnd.onAfter(WeaponVariety.stackController("์๋๊ฑฐ"))
SummonShootingShotgun.onAfter(WeaponVariety.stackController("์ท๊ฑด"))
SummonSlachingKnife.onAfter(WeaponVariety.stackController("๋์ดํ"))
SummonReleasingBoom_Explode.onAfter(WeaponVariety.stackController("๋ด"))
SummonStrikingBrick.onAfter(WeaponVariety.stackController("๋ธ๋ฆญ"))
SummonBeatingNeedlebat_1.onAfter(WeaponVariety.stackController("๋ฐฐํธ"))
SummonBeatingNeedlebat_1.onAfter(SummonBeatingNeedlebat_2)
SummonBeatingNeedlebat_2.onAfter(WeaponVariety.stackController("๋ฐฐํธ"))
SummonBeatingNeedlebat_2.onAfter(SummonBeatingNeedlebat_3)
SummonBeatingNeedlebat_3.onAfter(WeaponVariety.stackController("๋ฐฐํธ"))
SummonBeatingNeedlebat_3.onAfter(SummonBeatingNeedlebat_Honmy)
# ์จํฐ ๋ฒ๋ผ์ด์ดํฐ ํผ๋ ๋
WeaponVarietyFinale.onAfter(WeaponVarietyFinaleTrigger.stackController(-4))
WeaponVarietyAttack.onAfter(WeaponVarietyFinaleTrigger.stackController(1))
WeaponVarietyAttack.onAfter(
core.OptionalElement(lambda: WeaponVarietyFinaleTrigger.judge(4, 1) and WeaponVarietyFinale.is_available(), WeaponVarietyFinale, name="์จ๋ฒํผ ๋ฐ๋์กฐ๊ฑด"))
Reduce2sec = WeaponVarietyFinale.controller(2000, 'reduce_cooltime')
Reduce1sec = WeaponVarietyFinale.controller(1000, 'reduce_cooltime')
ChainArts_Fury_Damage.onAfter(Reduce1sec)
ChainArts_Crush.onAfter(Reduce2sec)
ChainArts_Takedown_Init.onAfter(Reduce2sec)
ChainArts_Takedown_Attack.onAfter(Reduce2sec)
ChainArts_Takedown_Wave.onAfter(Reduce2sec)
ChainArts_Takedown_Final.onAfter(Reduce2sec)
# ์นด๋ฐ๋ ๋ ์ฌ์ดํด๋ค.
# ํํ
NormalAttack = core.DamageSkill("ํํ", 0, 0, 0).wrap(core.DamageSkillWrapper)
for i in [ChainArts_Stroke_1, ChainArts_Stroke_2]:
NormalAttack.onAfter(i)
# ์ท๊ฑด-ํด๋ก
ShootgunClawCombo = comboBuilder("์ท๊ฑด-ํด๋ก", [ChainArts_Stroke_1_Cancel, SummonShootingShotgun, ChainArts_Stroke_1, ChainArts_Stroke_2_Cancel, SummonScratchingClaw])
# ์๋ฏธํฐ - ์ฒด์ด์ค
SimiterChaseCombo = comboBuilder("์๋ฏธํฐ-์ฒด์ด์ค", [ChainArts_Stroke_1_Cancel, SummonCuttingSimiter, ChainArts_Chais])
# ๋์ดํ
KnifeCombo = comboBuilder("๋์ดํ", [ChainArts_Stroke_1_Cancel, SummonSlachingKnife])
# ๋ด-๋ธ๋ฆญ
BommBrickCombo = comboBuilder("๋ด๋ธ๋ฆญ", [ChainArts_Stroke_1_Cancel, SummonReleasingBoom, ChainArts_Stroke_1_Cancel, SummonStrikingBrick])
# ์๋๊ฑฐ
WingDaggerCombo = comboBuilder("์๋๊ฑฐ", [ChainArts_Stroke_1_Cancel, SummonThrowingWingdagger])
# ๋ฐฐํธ
BatCombo = comboBuilder("๋ฐฐํธ", [ChainArts_Stroke_1_Cancel, SummonBeatingNeedlebat_1])
# ๋ฉ์ผ์คํธ๋กฌ
MaleStromCombo = core.DamageSkill("๋ฉ์ผ์คํธ๋กฌ", 0, 0, 0).wrap(core.DamageSkillWrapper)
for i in [ChainArts_Stroke_1, ChainArts_Stroke_2_Cancel, ChainArts_Maelstorm]:
MaleStromCombo.onAfter(i)
for c in [core.ConstraintElement('๋ฉ์ผ์คํธ๋กฌ', ChainArts_Maelstorm, ChainArts_Maelstorm.check_use)]:
MaleStromCombo.onConstraint(c)
# ์ฒด์ธ์์ธ - ํจ๋ฆฌ ์ฐ๋
# TODO: ํจ๋ฆฌ, ํ๋กํ์
๋ ์ถ๊ฐํ ๋ฐ๋์ ํฐํํ์ฌ ์ถ๊ฐ
for s in [ChainArts_Stroke_1, ChainArts_Stroke_2, ChainArts_Stroke_1_Cancel, ChainArts_Stroke_2_Cancel,
SummonCuttingSimiter, SummonScratchingClaw, SummonShootingShotgun, SummonSlachingKnife, ChainArts_Chais, SummonThrowingWingdaggerEnd,
ChainArts_Takedown_Init, ChainArts_Takedown_Attack, ChainArts_Takedown_Wave, ChainArts_Takedown_Final, ChainArts_Crush,
SummonReleasingBoom, SummonStrikingBrick, SummonBeatingNeedlebat_1, SummonBeatingNeedlebat_2, SummonBeatingNeedlebat_3, MirrorBreak]:
s.onAfter(ChainArts_Fury_Use)
for s in [SummonThrowingWingdaggerSummon, ChainArts_Maelstorm]:
s.onTick(ChainArts_Fury_Use)
# ํ๋กํ์
๋ ์์ด์ ํธ ์ถ๊ฐํ
for s in [ChainArts_Stroke_1, ChainArts_Stroke_2, ChainArts_Stroke_1_Cancel, ChainArts_Stroke_2_Cancel,
SummonCuttingSimiter, SummonScratchingClaw, SummonShootingShotgun, SummonSlachingKnife, ChainArts_Chais, SummonThrowingWingdaggerEnd,
SummonReleasingBoom, SummonStrikingBrick, SummonBeatingNeedlebat_1, SummonBeatingNeedlebat_2, SummonBeatingNeedlebat_3, ChainArts_Crush,
ChainArts_Takedown_Init, ChainArts_Takedown_Attack, ChainArts_Takedown_Wave, ChainArts_Takedown_Final,
ChainArts_Maelstorm, ChainArts_Fury_Damage]:
s.onAfter(ProfessionalAgent_Attack)
for s in [SummonThrowingWingdaggerSummon]:
s.onTick(ProfessionalAgent_Attack)
for s in [ChainArts_Fury_Damage, SummonShootingShotgun, SummonScratchingClaw,
SummonCuttingSimiter, SummonSlachingKnife,
SummonReleasingBoom, SummonStrikingBrick,
SummonBeatingNeedlebat_1, SummonThrowingWingdagger, ChainArts_Maelstorm, WeaponVarietyAttack, WeaponVarietyFinale]:
s.protect_from_running()
return(NormalAttack,
[globalSkill.maple_heros(chtr.level, name = "<NAME>", combat_level=self.combat), globalSkill.useful_sharp_eyes(), globalSkill.useful_combat_orders(),
WeaponVariety, Booster, SpecialPotion, ProfessionalAgent,
ReadyToDie, ChainArts_Fury, NovaGoddessBless,
SummonSlachingKnife_Horror, SummonBeatingNeedlebat_Honmy, VenomBurst_Poison, ChainArts_Maelstorm_Slow,
globalSkill.soul_contract(), CheapShotIIBleed, CheapShotIIBleedBuff, CheapShotIIAdventureMageBuff] +\
[SummonReleasingBoom_Explode, SummonThrowingWingdaggerEnd, AD_Odnunce_Final] +\
[WingDaggerCombo, BatCombo, BommBrickCombo, ShootgunClawCombo, SimiterChaseCombo, KnifeCombo, MaleStromCombo, ChainArts_Crush, MirrorBreak, MirrorSpider] +\
[WeaponVarietyAttack, SummonThrowingWingdaggerSummon, VenomBurst, AD_Odnunce, ChainArts_Maelstorm] +\
[ChainArts_Fury_Damage, WeaponVarietyFinale, SummonShootingShotgun, SummonScratchingClaw,
SummonCuttingSimiter, SummonSlachingKnife,
SummonReleasingBoom, SummonStrikingBrick,
SummonBeatingNeedlebat_1, SummonThrowingWingdagger] +\
[NormalAttack])
```
#### File: dpmModule/jobs/cannonshooter.py
```python
from ..kernel import core
from ..character import characterKernel as ck
from ..status.ability import Ability_tool
from ..execution.rules import RuleSet, ConditionRule
from . import globalSkill
from .jobbranch import pirates
from .jobclass import adventurer
from . import jobutils
from math import ceil
from typing import Any, Dict
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.jobtype = "str"
self.jobname = "์บ๋
ผ์ํฐ"
self.vEnhanceNum = 16
self.ability_list = Ability_tool.get_ability_set(
"boss_pdamage", "crit", "reuse"
)
self.preEmptiveSkills = 2
def get_ruleset(self):
def cannonball_rule(soul_contract):
if soul_contract.is_active():
return True
if soul_contract.is_cooltime_left(50000, -1):
return False
return True
ruleset = RuleSet()
ruleset.add_rule(
ConditionRule("๋น
ํด์ฆ ๊ธฐ๊ฐํฑ ์บ๋
ผ๋ณผ", "์์ธ ์ปจํธ๋ํธ", cannonball_rule),
RuleSet.BASE,
)
return ruleset
def get_modifier_optimization_hint(self):
return core.CharacterModifier(pdamage=66, crit_damage=6, armor_ignore=30)
def get_passive_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
BuildupCannon = core.InformedCharacterModifier("๋น๋์
์บ๋
ผ", att=20)
CriticalFire = core.InformedCharacterModifier(
"ํฌ๋ฆฌํฐ์ปฌ ํ์ด์ด", crit=20, crit_damage=5
)
PirateTraining = core.InformedCharacterModifier(
"ํ์ด๋ ํธ๋ ์ด๋", stat_main=30, stat_sub=30
)
MonkeyWavePassive = core.InformedCharacterModifier("๋ชฝํค ์จ์ด๋ธ(ํจ์๋ธ)", crit=20)
OakRuletPassive = core.InformedCharacterModifier(
"์คํฌํต ๋ฃฐ๋ (ํจ์๋ธ)", pdamage_indep=10
)
ReinforceCannon = core.InformedCharacterModifier("๋ฆฌ์ธํฌ์ค ์บ๋
ผ", att=40)
PirateSpirit = core.InformedCharacterModifier(
"ํ์ด๋ ์คํผ๋ฆฟ", boss_pdamage=40 + self.combat
)
OverburningCannon = core.InformedCharacterModifier(
"์ค๋ฒ๋ฒ๋ ์บ๋
ผ",
pdamage_indep=30 + passive_level,
armor_ignore=20 + passive_level // 2,
)
LoadedDicePassive = pirates.LoadedDicePassiveWrapper(vEhc, 3, 4)
return [
BuildupCannon,
CriticalFire,
PirateTraining,
MonkeyWavePassive,
OakRuletPassive,
ReinforceCannon,
PirateSpirit,
OverburningCannon,
LoadedDicePassive,
]
def get_not_implied_skill_list(
self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]
):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("๋ฌด๊ธฐ์์", pdamage_indep=50)
Mastery = core.InformedCharacterModifier(
"์๋ จ๋", pdamage_indep=-7.5 + 0.5 * ceil(passive_level / 2)
)
return [WeaponConstant, Mastery]
def generate(self, vEhc, chtr: ck.AbstractCharacter, options: Dict[str, Any]):
"""
ํ์ดํผ : ๋ชฝํคํธ์์ค-์คํ๋ฆฟ, ์ธํธ์ค, ์บ๋
ผ๋ฒ์คํฐ - ๋ฆฌ์ธํฌ์ค, ๋ณด๋์ค ์ดํ.
๋กค๋ง์บ๋
ผ๋ ์ธ๋ณด์ฐ 26ํ
์ฝ์ฝ๋ณผ 6์ด
์ด์จ๋ฐค 5ํ
์ฝ๊ฐ ์์:
๋ฒ์คํฐ-์ํฌํธ-๋ค์๊ธฐ-๋กค์บ
"""
COCOBALLHIT = options.get("cocoball_hit", 27)
ICBMHIT = 6
passive_level = chtr.get_base_modifier().passive_level + self.combat
# Buff skills
Booster = core.BuffSkill("๋ถ์คํฐ", 0, 200 * 1000).wrap(core.BuffSkillWrapper)
Buckshot = core.BuffSkill("๋ฒ
์ท", 0, 180000).wrap(core.BuffSkillWrapper)
LuckyDice = (
core.BuffSkill(
"๋ก๋๋ ๋ค์ด์ค",
delay=0,
remain=180 * 1000,
pdamage=20 # ๋ก๋๋ ๋ฐ๋ฏธ์ง ๊ณ ์ .
+ 10 / 6
+ 10 / 6 * (5 / 6 + 1 / 11) * (10 * (5 + passive_level) * 0.01),
)
.isV(vEhc, 3, 4)
.wrap(core.BuffSkillWrapper)
)
MonkeyWave = core.DamageSkill(
"๋ชฝํค ์จ์ด๋ธ",
delay=810,
damage=860,
hit=1,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyWaveBuff = core.BuffSkill(
"๋ชฝํค ์จ์ด๋ธ(๋ฒํ)",
delay=0,
remain=30000,
cooltime=-1,
crit_damage=5,
).wrap(core.BuffSkillWrapper)
MonkeyFurious = core.DamageSkill(
"๋ชฝํค ํจ๋ฆฌ์ด์ค",
delay=720,
damage=180,
hit=3,
cooltime=30 * 1000,
).wrap(core.DamageSkillWrapper)
MonkeyFuriousBuff = core.BuffSkill(
"๋ชฝํค ํจ๋ฆฌ์ด์ค(๋ฒํ)",
delay=0,
remain=30000,
cooltime=-1,
pdamage=40,
).wrap(core.BuffSkillWrapper)
MonkeyFuriousDot = core.DotSkill(
"๋ชฝํค ํจ๋ฆฌ์ด์ค(๋ํธ)",
summondelay=0,
delay=1000,
damage=200,
hit=1,
remain=30000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
OakRoulette = core.BuffSkill(
"์คํฌํต ๋ฃฐ๋ ",
delay=840,
remain=180000,
rem=True,
cooltime=180000,
crit_damage=1.25,
).wrap(core.BuffSkillWrapper)
OakRuletDOT = core.DotSkill(
"์คํฌํต ๋ฃฐ๋ (๋ํธ)",
summondelay=0,
delay=1000,
damage=50,
hit=1,
remain=5000,
cooltime=-1,
).wrap(core.DotSkillWrapper)
MonkeyMagic = core.BuffSkill(
"ํ์ดํผ ๋ชฝํค ์คํ ",
delay=0,
remain=180000,
rem=True,
stat_main=60 + passive_level,
stat_sub=60 + passive_level,
).wrap(core.BuffSkillWrapper)
# Damage Skills
CannonBuster = (
core.DamageSkill(
"์บ๋
ผ ๋ฒ์คํฐ",
delay=690,
damage=(750 + 5 * self.combat) * 0.45, # BuckShot
hit=3 * (4 + 1),
modifier=core.CharacterModifier(
crit=15 + ceil(self.combat / 2),
armor_ignore=20 + self.combat // 2,
pdamage=20,
),
)
.setV(vEhc, 0, 2, True)
.wrap(core.DamageSkillWrapper)
)
# Summon Skills
SupportMonkeyTwins = (
core.SummonSkill(
"์ํฌํธ ๋ชฝํค ํธ์์ค",
summondelay=720,
delay=930,
damage=(295 + 8 * self.combat) * 0.6, # Split Damage
hit=(1 + 1) * (2 + 1), # Split Damage, Enhance
remain=60000 + 2000 * self.combat,
rem=True,
)
.setV(vEhc, 1, 2, False)
.wrap(core.SummonSkillWrapper)
)
# Hyper
RollingCannonRainbow = (
core.SummonSkill(
"๋กค๋ง ์บ๋
ผ ๋ ์ธ๋ณด์ฐ",
summondelay=480,
delay=12000 / 26,
damage=600,
hit=3,
remain=12000,
cooltime=90000,
)
.setV(vEhc, 3, 2, True)
.wrap(core.SummonSkillWrapper)
)
EpicAdventure = core.BuffSkill(
"์ํฝ ์ด๋๋ฒค์ฒ",
delay=0,
remain=60000,
cooltime=120000,
pdamage=10,
).wrap(core.BuffSkillWrapper)
# V skills
WEAPON_ATT = jobutils.get_weapon_att(chtr)
Overdrive = pirates.OverdriveWrapper(vEhc, 5, 5, WEAPON_ATT)
PirateFlag = adventurer.PirateFlagWrapper(vEhc, 4, 3, chtr.level)
MirrorBreak, MirrorSpider = globalSkill.SpiderInMirrorBuilder(vEhc, 0, 0)
# ์ฟจํ์๋ง๋ค ์ฌ์ฉ
# ํ์์๋น ๋์ 27ํ ์ถฉ๋
BFGCannonball = core.StackableSummonSkillWrapper(
core.SummonSkill(
"๋น
ํด์ฆ ๊ธฐ๊ฐํฑ ์บ๋
ผ๋ณผ",
summondelay=600,
delay=210,
damage=(450 + 15 * vEhc.getV(0, 0)) * 0.45, # BuckShot
hit=4 * 3,
remain=210 * COCOBALLHIT,
cooltime=25000,
).isV(vEhc, 0, 0),
max_stack=3,
)
ICBM = (
core.DamageSkill(
"ICBM",
delay=1140,
damage=(800 + 32 * vEhc.getV(1, 1)) * 0.45, # BuckShot
hit=5 * ICBMHIT * 3,
cooltime=30000,
red=True,
)
.isV(vEhc, 1, 1)
.wrap(core.DamageSkillWrapper)
)
ICBMDOT = (
core.SummonSkill(
"ICBM(์ฅํ)",
summondelay=0,
delay=15000 / 27, # 27ํ
damage=(500 + 20 * vEhc.getV(1, 1)) * 0.45, # BuckShot
hit=1 * 3,
remain=15000,
cooltime=-1,
)
.isV(vEhc, 1, 1)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Cannon = (
core.SummonSkill(
"์คํ์
๋ชฝํค ์์ค์ฝํธ",
summondelay=780,
delay=1500,
damage=300 + 12 * vEhc.getV(2, 2),
hit=4 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=120000,
red=True,
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
SpecialMonkeyEscort_Bomb = (
core.SummonSkill(
"์คํ์
๋ชฝํค ์์ค์ฝํธ(ํญํ)",
summondelay=0,
delay=5000,
damage=450 + 18 * vEhc.getV(2, 2),
hit=7 * 3,
remain=(30 + vEhc.getV(2, 2) // 2) * 1000,
cooltime=-1,
modifier=core.CharacterModifier(armor_ignore=100),
)
.isV(vEhc, 2, 2)
.wrap(core.SummonSkillWrapper)
)
FullMaker = (
core.SummonSkill(
"ํ ๋ฉ์ด์ปค",
summondelay=720,
delay=360,
damage=(700 + 28 * vEhc.getV(0, 0)) * 0.45, # BuckShot
hit=3 * 3,
remain=360 * 20 - 1,
cooltime=60000,
red=True,
)
.isV(vEhc, 0, 0)
.wrap(core.SummonSkillWrapper)
)
### build graph relationships
MonkeyWave.onAfter(MonkeyWaveBuff)
MonkeyFurious.onAfters([MonkeyFuriousBuff, MonkeyFuriousDot])
CannonBuster.onAfter(OakRuletDOT)
BFGCannonball.onAfter(OakRuletDOT)
ICBM.onAfter(OakRuletDOT)
ICBM.onAfter(ICBMDOT)
SpecialMonkeyEscort_Cannon.onJustAfter(SpecialMonkeyEscort_Bomb)
return (
CannonBuster,
[
globalSkill.maple_heros(chtr.level, combat_level=self.combat),
globalSkill.useful_sharp_eyes(),
globalSkill.useful_combat_orders(),
globalSkill.useful_wind_booster(),
Booster,
OakRoulette,
Buckshot,
MonkeyMagic,
LuckyDice,
globalSkill.MapleHeroes2Wrapper(vEhc, 0, 0, chtr.level, self.combat),
EpicAdventure,
Overdrive,
PirateFlag,
globalSkill.soul_contract(),
]
+ [
SpecialMonkeyEscort_Cannon,
BFGCannonball,
FullMaker,
RollingCannonRainbow,
SupportMonkeyTwins,
]
+ [MonkeyWave, MonkeyFurious, ICBM, MirrorBreak]
+ [
SpecialMonkeyEscort_Bomb,
MirrorSpider,
OakRuletDOT,
MonkeyFuriousDot,
MonkeyWaveBuff,
MonkeyFuriousBuff,
ICBMDOT,
] # Not used from scheduler
+ [CannonBuster],
)
```
#### File: dpmModule/jobs/xenon.py
```python
from ..kernel import core
from ..character import characterKernel as ck
from ..execution.rules import RuleSet, ConcurrentRunRule, ConditionRule
from ..status.ability import Ability_tool
from . import globalSkill
from .jobbranch import thieves, pirates
from .jobclass import resistance
from . import jobutils
from math import ceil
from typing import Any, Dict
'''
Advisor: Monolith, ๋ชฐ๋ผ#4508
'''
# TODO: ํ์ดํผ์คํฏ์ผ๋ก ์คํ ์ค 10% ํ๋ณด ํ์
# ๋์ ํ
๋ญ์ ๋
ผ ๊ฐ์
# ์ด์ง์ค ์์คํ
๋ฏธ์ฌ์ฉ
class SupplyStackWrapper(core.StackSkillWrapper):
def __init__(self, skill, amaranth_generator):
super(SupplyStackWrapper, self).__init__(skill, 20)
self.stack = 20
self.set_name_style("์ํ๋ผ์ด ๋ณํ : %d")
self.amaranth_generator = amaranth_generator
# ์๋ง๋์ค ํ์ฑํ์ ์๋์ง ์๋ชจ ์์
def vary(self, d):
delta = d
if self.amaranth_generator.is_active():
delta = max(0, delta)
result = super().vary(delta)
return result
def get_modifier(self):
'''
์ํ๋ฌ์ค ์ํ๋ผ์ด: ์ํ๋ฌ์ค ์๋์ง 1๊ฐ ๋น ๋ชจ๋ ๋ฅ๋ ฅ์น 1%๋งํผ ์ฆ๊ฐ, 20 ์ด๊ณผ์ ์ด๊ณผ ์๋์ง๋น ์ต์ข
๋ฐ๋ฏธ์ง 1% ์ฆ๊ฐ
'''
return core.CharacterModifier(pstat_main=self.stack, pstat_sub=self.stack, pdamage_indep=max(0, self.stack-20))
def begin_overload(self):
self._max = 40
return self._result_object_cache
def beginOverloadMode(self):
return core.TaskHolder(core.Task(self, self.begin_overload), name="์ค๋ฒ๋ก๋ ๋ชจ๋ ์์")
def end_overload(self):
self.stack = min(20, self.stack)
self._max = 20
return self._result_object_cache
def endOverloadMode(self):
return core.TaskHolder(core.Task(self, self.end_overload), name="์ค๋ฒ๋ก๋ ๋ชจ๋ ์ข
๋ฃ")
class JobGenerator(ck.JobGenerator):
def __init__(self):
super(JobGenerator, self).__init__()
self.jobtype = "luk"
self.jobname = "์ ๋
ผ"
self.vEnhanceNum = None
self.ability_list = Ability_tool.get_ability_set('boss_pdamage', 'crit', 'buff_rem')
self.preEmptiveSkills = 2
def get_ruleset(self):
ruleset = RuleSet()
for skill in ['๋ฉ๊ฐ ์ค๋งค์
(๊ฐ์)', '์์ธ ์ปจํธ๋ํธ', '๋ ๋ ํฌ ๋ค์ด', '์ค๋ฒ ๋๋ผ์ด๋ธ']:
ruleset.add_rule(ConcurrentRunRule(skill, 'ํ๋ก๊ทธ๋จ ๊ทธ๋ํผํฐ : ์ตํฉ'), RuleSet.BASE)
# ruleset.add_rule(ConditionRule('ํฌํค ๋ ์ด', 'ํ๋ก๊ทธ๋จ ๊ทธ๋ํผํฐ : ์ตํฉ', lambda sk : sk.is_active() or sk.is_cooltime_left(690*15, 1)), RuleSet.BASE)
ruleset.add_rule(ConcurrentRunRule('๋ฉ๊ฐ ์ค๋งค์
(๊ฐ์)', '์คํ์ธ ์ฝ๋'), RuleSet.BASE)
ruleset.add_rule(ConditionRule('์์คํธ๋ผ ์ํ๋ผ์ด', '์ํ๋ฌ์ค ์ํ๋ผ์ด', lambda sk : sk.stack < sk._max - 10), RuleSet.BASE)
return ruleset
def get_modifier_optimization_hint(self):
return core.CharacterModifier(pstat_main=20, pstat_sub=20)
def get_passive_skill_list(self, vEhc, chtr : ck.AbstractCharacter, options: Dict[str, Any]):
passive_level = chtr.get_base_modifier().passive_level + self.combat
Multilateral1 = core.InformedCharacterModifier("๋ฉํฐ๋ํฐ๋ด I", pdamage=3)
Multilateral2 = core.InformedCharacterModifier("๋ฉํฐ๋ํฐ๋ด II", pdamage=5)
Multilateral3 = core.InformedCharacterModifier("๋ฉํฐ๋ํฐ๋ด III", pdamage=7)
Multilateral4 = core.InformedCharacterModifier("๋ฉํฐ๋ํฐ๋ด IV", pdamage=10)
Multilateral5 = core.InformedCharacterModifier("๋ฉํฐ๋ํฐ๋ด V", pdamage=10)
Multilateral6 = core.InformedCharacterModifier("๋ฉํฐ๋ํฐ๋ด VI", pdamage=5)
Multilateral = [Multilateral1, Multilateral2, Multilateral3, Multilateral4, Multilateral5, Multilateral6]
LinearPerspective = core.InformedCharacterModifier("๋ฆฌ๋์ด ํผ์คํํฐ๋ธ", crit=40)
MinoritySupport = core.InformedCharacterModifier("๋ง์ด๋๋ฆฌํฐ ์ํฌํธ", stat_main=20, stat_sub=20)
XenonMastery = core.InformedCharacterModifier("์ ๋
ผ ๋ง์คํฐ๋ฆฌ", att=20)
HybridDefensesPassive = core.InformedCharacterModifier("๋์ผ๋ธ๋ฆฌ๋ ๋ํ์๋ธ(ํจ์๋ธ)", stat_main=10, stat_sub=10)
XenonExpert = core.InformedCharacterModifier("์ ๋
ผ ์์คํผํธ", att=30 + passive_level, crit_damage=8)
OffensiveMatrix = core.InformedCharacterModifier("์คํ์๋ธ ๋งคํธ๋ฆญ์ค", armor_ignore=30 + passive_level)
LoadedDicePassive = pirates.LoadedDicePassiveWrapper(vEhc, 3, 4)
ReadyToDiePassive = thieves.ReadyToDiePassiveWrapper(vEhc, 2, 2)
return Multilateral + [LinearPerspective, MinoritySupport, XenonMastery, HybridDefensesPassive, XenonExpert, OffensiveMatrix,
LoadedDicePassive, ReadyToDiePassive]
def get_not_implied_skill_list(self, vEhc, chtr : ck.AbstractCharacter, options: Dict[str, Any]):
passive_level = chtr.get_base_modifier().passive_level + self.combat
WeaponConstant = core.InformedCharacterModifier("๋ฌด๊ธฐ์์", pdamage_indep=50)
JobConstant = core.InformedCharacterModifier("์ง์
์์", pdamage_indep=-12.5)
Mastery = core.InformedCharacterModifier("์๋ จ๋", pdamage_indep=-5+0.5*ceil(passive_level/2))
return [WeaponConstant, JobConstant, Mastery]
def generate(self, vEhc, chtr : ck.AbstractCharacter, options: Dict[str, Any]):
'''
TODO: ๋์ฌ์ดํด ์ต์ ํ, return๋ฌธ ์ ๋ฆฌ
ํ์ดํผ ์คํฌ: ํ๋ก๊ทธ๋จ 3์ข
, ํผ์ง๋กญ ๋์ฆ + ๋ฐฉ๋ฌด
'''
# Buff skills
# ํซ๋ฒํ: ์ธํด๋ผ์ธ, ์ํผ์
์, ๋ถ์คํฐ
InclinePower = core.BuffSkill("์ธํด๋ผ์ธ ํ์", 0, 240000, att=30, rem=True).wrap(core.BuffSkillWrapper)
EfficiencyPipeLine = core.BuffSkill("์ํผ์
์ ํ์ดํ๋ผ์ธ", 0, 240000, rem=True).wrap(core.BuffSkillWrapper)
Booster = core.BuffSkill("์ ๋
ผ ๋ถ์คํฐ", 0, 240000, rem=True).wrap(core.BuffSkillWrapper)
HybridDefenses = core.BuffSkill("๋์ผ๋ธ๋ฆฌ๋ ๋ํ์๋ธ", 0, 999999999).wrap(core.BuffSkillWrapper)
VirtualProjection = core.BuffSkill("๋ฒ์ถ์ผ ํ๋ก์ ์
", 0, 999999999).wrap(core.BuffSkillWrapper)
# ์์ปด์์ ๋๋ ์ด ์์
ExtraSupply = core.BuffSkill("์์คํธ๋ผ ์ํ๋ผ์ด", 0, 1, cooltime=30000, red=True).wrap(core.BuffSkillWrapper)
OOPArtsCode = core.BuffSkill("์คํ์ธ ์ฝ๋", 990, (30+self.combat//2)*1000, pdamage_indep=25+self.combat//2, boss_pdamage=30+self.combat).wrap(core.BuffSkillWrapper)
# Damage skills
# ๋ก์ผ๊ฐํ ์ ์ฉ๋จ
PinpointRocket = core.DamageSkill("ํํฌ์ธํธ ๋ก์ผ", 0, 50+40+40+100, 4, cooltime=2000).setV(vEhc, 0, 2, True).wrap(core.DamageSkillWrapper)
# ํ์๋ Skill Wrapper ์ชฝ์ผ๋ก ์ด๊ด
# AegisSystem = core.DamageSkill("์ด์ง์ค ์์คํ
", 0, 120, 1, modifier=core.CharacterModifier(pdamage=20+passive_level//3), cooltime=1500).setV(vEhc, 0, 2, True).wrap(core.DamageSkillWrapper)
# 30%ํ๋ฅ ๋ก ์ค์ฒฉ ์์, 3์ค์ฒฉ ์์ ํ ๊ณต๊ฒฉ์ ํฐ์ง๋ฉด์ ์ฌ๋ผ์ง๋๋ก
Triangulation = core.DamageSkill("ํธ๋ผ์ด์ต๊ธ ํฌ๋ฉ์ด์
", 0, 340, 3).setV(vEhc, 0, 3, True).wrap(core.DamageSkillWrapper)
PurgeSnipe = core.DamageSkill("ํผ์ง๋กญ ๋งค์ค์ปค๋ ์ด๋ : ์ ๊ฒฉ", 690, 345 + 2*self.combat, 7, modifier=core.CharacterModifier(armor_ignore=30 + self.combat) + core.CharacterModifier(pdamage=20, armor_ignore=10)).setV(vEhc, 0, 2, True).wrap(core.DamageSkillWrapper)
# ์ญ์ฅ ๊ธฐ์ค
# ํ์ดํผ 3์ข
์ ์ฉ
Hologram_Penetrate = core.SummonSkill("ํ๋ก๊ทธ๋จ ๊ทธ๋ํผํฐ : ๊ดํต", 720, 30000/116, 213+3*self.combat, 1, 20000+10000, cooltime=30000-1000*ceil(self.combat/3), modifier=core.CharacterModifier(pdamage=10), red=True).setV(vEhc, 0, 2, True).wrap(core.SummonSkillWrapper)
Hologram_ForceField = core.SummonSkill("ํ๋ก๊ทธ๋จ ๊ทธ๋ํผํฐ : ์ญ์ฅ", 720, 30000/64, 400+5*self.combat, 1, 20000+10000, cooltime=30000-1000*ceil(self.combat/3), modifier=core.CharacterModifier(pdamage=10), red=True).setV(vEhc, 0, 2, True).wrap(core.SummonSkillWrapper)
'''
BladeDancingPrepare = core.DamageSkill("๋ธ๋ ์ด๋ ๋์ฑ (์ค๋น)", 720 + 420, 0, 0).setV(vEhc, 0, 2, True).wrap(core.DamageSkillWrapper)
BladeDancing = core.DamageSkill("๋ธ๋ ์ด๋ ๋์ฑ", 480, 140+4*self.combat, 1).setV(vEhc, 0, 2, True).wrap(core.DamageSkillWrapper)
BladeDancingEnd = core.DamageSkill("๋ธ๋ ์ด๋ ๋์ฑ(์ข
๋ฃ)", 300, 0, 0).setV(vEhc, 0, 2, True).wrap(core.DamageSkillWrapper)
'''
# Hyper skills
AmaranthGenerator = core.BuffSkill("์๋ง๋์ค ์ ๋ค๋ ์ดํฐ", 900, 10000, cooltime=90000, rem=False).wrap(core.BuffSkillWrapper) # ์๋์ง ์ต๋์น, 10์ด๊ฐ ์๋์ง ์๋ชจ ์์
MeltDown = core.DamageSkill("๋ฉํธ๋ค์ด ์ต์คํ๋ก์ ", 3150, 900, 6, red=False, cooltime=50000).setV(vEhc, 0, 2, False).wrap(core.DamageSkillWrapper)
MeltDown_Armor = core.BuffSkill("๋ฉํธ๋ค์ด ์ต์คํ๋ก์ (๋ฐฉ๋ฌด)", 0, 10000, armor_ignore=30, rem=False, cooltime=-1).wrap(core.BuffSkillWrapper)
MeltDown_Damage = core.BuffSkill("๋ฉํธ๋ค์ด ์ต์คํ๋ก์ (๋ฐ๋ฏธ์ง)", 0, 25000, pdamage=10, rem=False, cooltime=-1).wrap(core.BuffSkillWrapper)
# V skills
MirrorBreak, MirrorSpider = globalSkill.SpiderInMirrorBuilder(vEhc, 0, 0)
LuckyDice = core.BuffSkill("๋ญํค ๋ค์ด์ค", 0, 180*1000, pdamage=20).isV(vEhc, 3, 4).wrap(core.BuffSkillWrapper)
ResistanceLineInfantry = resistance.ResistanceLineInfantryWrapper(vEhc, 0, 0)
ReadyToDie = thieves.ReadyToDieWrapper(vEhc, 4, 4)
WEAPON_ATT = jobutils.get_weapon_att(chtr)
Overdrive = pirates.OverdriveWrapper(vEhc, 5, 5, WEAPON_ATT)
MegaSmasher = core.DamageSkill("๋ฉ๊ฐ ์ค๋งค์
(๊ฐ์)", 0, 0, 0, cooltime=180000, red=True).wrap(core.DamageSkillWrapper)
MegaSmasherTick = core.DamageSkill("๋ฉ๊ฐ ์ค๋งค์
(ํฑ)", 210, 300+10*vEhc.getV(4, 4), 6).isV(vEhc, 4, 4).wrap(core.DamageSkillWrapper)
OVERLOAD_TIME = 70
OverloadMode = core.BuffSkill("์ค๋ฒ๋ก๋ ๋ชจ๋", 720, OVERLOAD_TIME*1000, cooltime=180000, red=True).wrap(core.BuffSkillWrapper)
# ์ฒซ ๊ณต๊ฒฉ์ ํญ์ 5100ms ํ์ ์์
# ๊ณต๊ฒฉ ์ฃผ๊ธฐ๋ 3600ms~10800ms ์ค ๋๋ค
OverloadHit = core.SummonSkill("์ค๋ฒ๋ก๋ ๋ชจ๋(์ ๋ฅ)", 0, (3600+10800)/2, 180+7*vEhc.getV(4, 4), 6*4, OVERLOAD_TIME*1000-5100, cooltime=-1).isV(vEhc, 4, 4).wrap(core.SummonSkillWrapper)
OverloadHit_copy = core.SummonSkill("์ค๋ฒ๋ก๋ ๋ชจ๋(์ ๋ฅ)(๋ฒ์ถ์ผ ํ๋ก์ ์
)", 0, (3600+10800)/2, (180+7*vEhc.getV(4, 4))*0.7, 6*4, OVERLOAD_TIME*1000-5100, cooltime=-1).isV(vEhc, 4, 4).wrap(core.SummonSkillWrapper)
# ํ์ดํผ ์ ์ฉ๋จ
Hologram_Fusion = core.SummonSkill("ํ๋ก๊ทธ๋จ ๊ทธ๋ํผํฐ : ์ตํฉ", 930, (30000+10000)/176, 250+10*vEhc.getV(4, 4), 5, 30000+10000, cooltime=100000, red=True, modifier=core.CharacterModifier(pdamage=10)).isV(vEhc, 4, 4).wrap(core.SummonSkillWrapper)
Hologram_Fusion_Buff = core.BuffSkill("ํ๋ก๊ทธ๋จ ๊ทธ๋ํผํฐ : ์ตํฉ (๋ฒํ)", 0, 30000+10000, pdamage=5+vEhc.getV(4, 4)//2, rem=False).wrap(core.BuffSkillWrapper)
# 30ํ ๋ฐ๋, ๋ฐ์ฌ ๋๋ ์ด ์๋ต, ํผ์ง๋กญ์ผ๋ก ์ถฉ์
PhotonRay = core.BuffSkill("ํฌํค ๋ ์ด", 0, 20000, cooltime=35000, red=True).wrap(core.BuffSkillWrapper)
PhotonRayHit = core.DamageSkill("ํฌํค ๋ ์ด(์บ๋
ผ)", 0, 350+vEhc.getV(4, 4)*14, 4*30).isV(vEhc, 4, 4).wrap(core.DamageSkillWrapper)
###### Skill Wrapper ######
SupplySurplus = SupplyStackWrapper(core.BuffSkill("์ํ๋ฌ์ค ์ํ๋ผ์ด", 0, 999999999), AmaranthGenerator)
SupplyCharger = core.SummonSkill("์ํ๋ผ์ด ์ถฉ์ ", 0, 4000, 0, 0, 9999999999).wrap(core.SummonSkillWrapper)
SupplyCharger.onTick(SupplySurplus.stackController(1))
# ํ๋ก๊ทธ๋จ ์คํฌ๋ค์ ์ตํฉ๊ณผ ํจ๊ป ์ฌ์ฉ๋ถ๊ฐ
for skill in [Hologram_ForceField, Hologram_Penetrate]:
skill.onConstraint(core.ConstraintElement(skill._id + '(์ฌ์ฉ ์ ํ)', Hologram_Fusion, Hologram_Fusion.is_not_active))
Hologram_Fusion.onAfter(skill.controller(1, 'set_disabled'))
PinpointRocketOpt = core.OptionalElement(PinpointRocket.is_available, PinpointRocket)
# ํ๋ก๊ทธ๋จ ์ตํฉ ํ์ฑํ์ 10๊ฐ, ์๋๋ฉด 3๊ฐ
# AegisSystemOpt_ = core.OptionalElement(Hologram_Fusion_Buff.is_active, core.RepeatElement(AegisSystem, 10), core.RepeatElement(AegisSystem, 3))
# AegisSystemOpt = core.OptionalElement(AegisSystem.is_active, AegisSystemOpt_)
InclinePower.onAfter(SupplySurplus.stackController(-3))
HybridDefenses.onAfter(SupplySurplus.stackController(-7))
ExtraSupply.onAfter(SupplySurplus.stackController(10))
OOPArtsCode.onAfter(SupplySurplus.stackController(-20))
AmaranthGenerator.onAfter(SupplySurplus.stackController(40))
TriangulationStack = core.StackSkillWrapper(core.BuffSkill("ํธ๋ผ์ด์ต๊ธ ์คํ", 0, 99999999), 3)
TriangulationTrigger = core.OptionalElement(lambda : TriangulationStack.judge(3, 1), Triangulation, TriangulationStack.stackController(0.3))
Triangulation.onAfter(TriangulationStack.stackController(0, dtype='set'))
MegaSmasher.onAfter(core.RepeatElement(MegaSmasherTick, 78))
OverloadMode.onAfter(SupplySurplus.beginOverloadMode())
OverloadMode.onEventElapsed(SupplySurplus.endOverloadMode(), OVERLOAD_TIME*1000)
OverloadMode.onEventElapsed(OverloadHit, 5100)
OverloadMode.onEventElapsed(OverloadHit_copy, 5100)
# ํผ์ง๋กญ 15ํ ์ฌ์ฉ ํ ํฌํค๋ ์ด ๋ฐ๋, ์ต์ ํ ํ์
PhotonRay.onEventElapsed(PhotonRayHit, 690*15)
for sk in [PurgeSnipe, MeltDown, MegaSmasherTick]:
sk.onAfter(TriangulationTrigger)
sk.onAfter(PinpointRocketOpt)
jobutils.create_auxilary_attack(sk, 0.7, nametag="(๋ฒ์ถ์ผ ํ๋ก์ ์
)")
MeltDown.onAfter(MeltDown_Armor)
MeltDown.onAfter(MeltDown_Damage)
OverloadHit.onTick(TriangulationTrigger)
OverloadHit.onTick(PinpointRocketOpt)
Hologram_Fusion.onAfter(Hologram_Fusion_Buff)
for sk in [PinpointRocket, Triangulation, MegaSmasherTick, Hologram_Fusion_Buff, PhotonRayHit]:
sk.protect_from_running()
return(PurgeSnipe,
[globalSkill.maple_heros(chtr.level, combat_level=self.combat), globalSkill.useful_sharp_eyes(), globalSkill.useful_combat_orders(),
globalSkill.useful_hyper_body_xenon(), globalSkill.MapleHeroes2Wrapper(vEhc, 0, 0, chtr.level, self.combat), globalSkill.soul_contract(),
SupplySurplus, SupplyCharger, InclinePower, EfficiencyPipeLine, Booster, HybridDefenses, VirtualProjection, ExtraSupply] +
[Hologram_ForceField, AmaranthGenerator, MirrorBreak, MirrorSpider, MegaSmasher, MegaSmasherTick, ResistanceLineInfantry, LuckyDice, ReadyToDie, Overdrive,
OverloadMode, Hologram_Fusion, Hologram_Fusion_Buff, OverloadHit, OverloadHit_copy, PhotonRay, PhotonRayHit, MeltDown, MeltDown_Armor, MeltDown_Damage] +
[PinpointRocket, Triangulation, OOPArtsCode] +
[PurgeSnipe])
```
#### File: Jeongwoo-KGI/maplestory_dpm_calc/dpm_sheet.py
```python
import argparse
from concurrent.futures import ProcessPoolExecutor
from itertools import product
from statistics.preset import get_preset_list
from dpmModule.character.characterTemplate import get_template_generator
from dpmModule.util.dpmgenerator import IndividualDPMGenerator
try:
import pandas as pd
import xlsxwriter
except ImportError:
print("pandas, xlsxwriter ๋ชจ๋์ ์ค์นํด์ผ ํฉ๋๋ค.")
exit()
def get_args():
parser = argparse.ArgumentParser("DPM Sheet argument")
parser.add_argument("--ulevel", type=int, default=8000)
parser.add_argument("--time", type=int, default=1800)
parser.add_argument("--thread", type=int, default=4)
return parser.parse_args()
def test(args):
preset, ulevel, cdr, runtime = args
id, jobname, description, options, alt = preset
template = get_template_generator("high_standard")().get_template(ulevel)
parser = IndividualDPMGenerator(jobname, template)
parser.set_runtime(runtime * 1000)
result = parser.get_detailed_dpm(ulevel=ulevel, cdr=cdr, options=options)
dpm = result["dpm"]
loss = result["loss"]
return jobname, cdr, description, dpm, loss, alt
if __name__ == "__main__":
args = get_args()
ulevel = args.ulevel
tasks = product(get_preset_list(), [ulevel], [0, 2, 4], [args.time])
pool = ProcessPoolExecutor(max_workers=args.thread)
results = pool.map(test, tasks)
df = pd.DataFrame.from_records(
results,
exclude=["loss"],
columns=["์ง์
", "์ฟจ๊ฐ", "๋น๊ณ ", "dpm", "loss", "alt"],
)
# df.to_pickle("cache.pkl")
# df: pd.DataFrame = pd.read_pickle("cache.pkl")
df = df.sort_values(by="dpm", axis=0, ascending=False)
df = df.drop_duplicates(subset=["์ง์
", "๋น๊ณ "]).copy()
df["best"] = df.groupby(["์ง์
"])["dpm"].transform("max")
df = df.sort_values(by=["best", "dpm"], axis=0, ascending=False)
median = df["best"].median()
df["๋ฐฐ์จ"] = df["dpm"] / median
df = df[["์ง์
", "์ฟจ๊ฐ", "๋น๊ณ ", "dpm", "๋ฐฐ์จ", "alt"]]
writer = pd.ExcelWriter("./dpm_sheet.xlsx", engine="xlsxwriter")
df.to_excel(writer, sheet_name="dpm", index=False)
workbook: xlsxwriter.Workbook = writer.book
worksheet: xlsxwriter.workbook.Worksheet = writer.sheets["dpm"]
center_format = workbook.add_format({"align": "center"})
num_format = workbook.add_format({"num_format": "#,##0", "align": "center"})
percent_format = workbook.add_format({"num_format": "0.00%", "align": "center"})
alt_format = workbook.add_format({"font_color": "#808080"})
worksheet.set_column("A:I", None, center_format)
worksheet.set_column("A:A", 15)
worksheet.set_column("B:B", 5)
worksheet.set_column("C:C", 45)
worksheet.set_column("D:D", 18, num_format)
worksheet.set_column("E:E", 10, percent_format)
worksheet.set_column("F:F", None, None, {"hidden": True})
worksheet.conditional_format(
"E2:E55", {"type": "data_bar", "bar_solid": True, "bar_color": "#63C384"}
)
worksheet.conditional_format(
"A2:D55", {"type": "formula", "criteria": "$F2>0", "format": alt_format}
)
writer.close()
```
#### File: maplestory_dpm_calc/statistics/optimization_hint.py
```python
import argparse
import pandas as pd
from dpmModule.character.characterKernel import ItemedCharacter, JobGenerator
from dpmModule.character.characterTemplate import get_template_generator
from dpmModule.jobs import jobMap, weaponList
from dpmModule.kernel import core
from dpmModule.status.ability import Ability_grade
from .loader import load_data
from .preset import get_preset
from .saver import save_data
def get_args():
parser = argparse.ArgumentParser("Optimization hint argument")
parser.add_argument(
"--id", type=str, help="Target preset id to calculate statistics"
)
parser.add_argument("--ulevel", type=int, default=8000)
parser.add_argument("--cdr", type=int, default=0)
parser.add_argument("--time", type=int, default=1800)
parser.add_argument("--task", default="dpm")
parser.add_argument("--calc", action="store_true")
return parser.parse_args()
def armor_percent_to_float(num: float):
return (100 - num) / 100
def armor_float_to_percent(num: float):
return 100 - num * 100
def get_modifier(args) -> core.CharacterModifier:
preset = get_preset(args.id)
template = get_template_generator("high_standard")().get_template(args.ulevel)
target: ItemedCharacter = template(weaponList[preset.job], args.cdr)
gen: JobGenerator = jobMap[preset.job].JobGenerator()
v_builder = core.AlwaysMaximumVBuilder()
graph = gen.package(
target,
v_builder,
options=preset.options,
ulevel=args.ulevel,
weaponstat=[4, 9],
ability_grade=Ability_grade(4, 1),
)
return graph.get_default_buff_modifier()
def optimization_hint(args, df: pd.DataFrame):
buff_modifier = get_modifier(args)
df = df[["name", "deal", "mdf"]]
df = df.loc[df["deal"] > 0]
deal_total = df["deal"].sum()
df["crit_damage"] = df["mdf"].apply(lambda x: x["crit_damage"])
df["pdamage"] = df["mdf"].apply(lambda x: x["pdamage"])
df["boss_pdamage"] = df["mdf"].apply(lambda x: x["boss_pdamage"])
df["armor_ignore"] = df["mdf"].apply(lambda x: x["armor_ignore"])
df["patt"] = df["mdf"].apply(lambda x: x["patt"])
grouped = df.groupby(["name"])
df = pd.DataFrame()
df["share"] = grouped["deal"].sum() / deal_total
df["crit_damage"] = grouped["crit_damage"].mean()
df["pdamage"] = grouped["pdamage"].mean()
df["boss_pdamage"] = grouped["boss_pdamage"].mean()
df["armor_ignore"] = grouped["armor_ignore"].mean()
df["patt"] = grouped["patt"].mean()
print(df)
crit_damage = (df["crit_damage"] * df["share"]).sum()
pdamage = (df["pdamage"] * df["share"]).sum()
boss_pdamage = (df["boss_pdamage"] * df["share"]).sum()
armor_ignore = (df["armor_ignore"] * df["share"]).sum()
patt = (df["patt"] * df["share"]).sum()
print(
{
"crit_damage": crit_damage - buff_modifier.crit_damage,
"pdamage": pdamage - buff_modifier.pdamage,
"boss_pdamage": boss_pdamage - buff_modifier.boss_pdamage,
"armor_ignore": armor_float_to_percent(
armor_percent_to_float(armor_ignore)
/ armor_percent_to_float(buff_modifier.armor_ignore)
/ armor_percent_to_float(20)
),
"patt": patt - buff_modifier.patt,
}
)
if __name__ == "__main__":
args = get_args()
if args.calc:
data = save_data(args)
else:
data = load_data(args)
optimization_hint(args, data)
```
#### File: Jeongwoo-KGI/maplestory_dpm_calc/test.py
```python
from dpmModule.character.characterTemplate import get_template_generator
from dpmModule.util.dpmgenerator import IndividualDPMGenerator
from dpmModule.util.configurations import export_configuration
import json
import argparse
def get_args():
parser = argparse.ArgumentParser("DPM Test argument")
parser.add_argument("--job", type=str, help="Target class' Korean name to test DPM")
parser.add_argument("--level", type=int, default=None, help="Character's level, default depends on ulevel")
parser.add_argument("--ulevel", type=int, default=6000, help="Union level, default is 6000")
parser.add_argument("--time", type=int, default=1800, help="Test time in seconds, default is 1800(30 min)")
parser.add_argument("--cdr", type=int, default=0, help="Cooltime reduce (hat potential) in seconds, default is 0")
parser.add_argument("--log", action="store_true", help="print the log of the test")
parser.add_argument("--stat", action="store_true", help="print the statistics of the test")
parser.add_argument("--task", default="dpm")
return parser.parse_args()
def test():
args = get_args()
if args.task == "dpm":
dpm(args)
elif args.task == "conf":
conf(args)
def conf(args):
job_real = args.job[:].replace("-", "/")
configuration = export_configuration(job_real)
regularized_configuration = {}
for k_name, v in configuration.items():
new_v = v
if new_v['cooltime'] == 99999999:
new_v['cooltime'] = -1
if 'static_character_modifier' in new_v:
for modifier_k, modifier_v in new_v.items():
if modifier_v != 0:
new_v[modifier_k] = modifier_v
new_v.pop('static_character_modifier')
if 'static_character_modifier' in new_v:
new_v['type'] = 'BuffSkill'
elif 'summondelay' in new_v:
new_v['type'] = 'SummonSkill'
else:
new_v['type'] = 'DamageSkill'
new_v.pop('explanation')
if '_static_skill_modifier' in new_v:
if '0' in new_v['_static_skill_modifier']:
for k in new_v['_static_skill_modifier']:
pops = [k1 for k1, v in new_v['_static_skill_modifier'][k].items() if v == 0]
for k2 in pops:
new_v['_static_skill_modifier'][k].pop(k2)
else:
pops = [k1 for k1, v in new_v['_static_skill_modifier'].items() if v == 0]
for k2 in pops:
new_v['_static_skill_modifier'].pop(k2)
regularized_configuration[k_name] = v
with open(f"{args.job}.conf.json", "w", encoding="utf8") as f:
json.dump(regularized_configuration, f, ensure_ascii=False, indent=4)
def dpm(args):
template = get_template_generator("high_standard")().get_template(args.ulevel)
parser = IndividualDPMGenerator(args.job, template)
parser.set_runtime(args.time * 1000)
try:
dpm = parser.get_dpm(
ulevel=args.ulevel,
cdr=args.cdr,
level=args.level,
weaponstat=[4, 9],
printFlag=args.log,
statistics=args.stat or args.log,
)
finally:
print(args.job, f"{dpm:,.3f}")
if __name__ == "__main__":
test()
```
|
{
"source": "JeongwookUm/TEST_AutoSpeech-master",
"score": 2
}
|
#### File: JeongwookUm/TEST_AutoSpeech-master/functions.py
```python
import time
import torch
import torch.nn.functional as F
import logging
import numpy as np
import matplotlib.pyplot as plt
from utils import compute_eer
from utils import AverageMeter, ProgressMeter, accuracy
plt.switch_backend('agg')
logger = logging.getLogger(__name__)
def train(cfg, model, optimizer, train_loader, val_loader, criterion, architect, epoch, writer_dict, lr_scheduler=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
alpha_entropies = AverageMeter('Entropy', ':.4e')
progress = ProgressMeter(
len(train_loader), batch_time, data_time, losses, top1, top5, alpha_entropies,
prefix="Epoch: [{}]".format(epoch), logger=logger)
writer = writer_dict['writer']
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
global_steps = writer_dict['train_global_steps']
if lr_scheduler:
current_lr = lr_scheduler.set_lr(optimizer, global_steps, epoch)
else:
current_lr = cfg.TRAIN.LR
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
input_search, target_search = next(iter(val_loader))
input_search = input_search.cuda(non_blocking=True)
target_search = target_search.cuda(non_blocking=True)
# step architecture
architect.step(input_search, target_search)
alpha_entropy = architect.model.compute_arch_entropy()
alpha_entropies.update(alpha_entropy.mean(), input.size(0))
# compute output
output = model(input)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
loss = criterion(output, target)
losses.update(loss.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# write to logger
writer.add_scalar('lr', current_lr, global_steps)
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('arch_entropy', alpha_entropies.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
# log acc for cross entropy loss
writer.add_scalar('train_acc1', top1.val, global_steps)
writer.add_scalar('train_acc5', top5.val, global_steps)
if i % cfg.PRINT_FREQ == 0:
progress.print(i)
def train_from_scratch(cfg, model, optimizer, train_loader, criterion, epoch, writer_dict, lr_scheduler=None):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader), batch_time, data_time, losses, top1, top5, prefix="Epoch: [{}]".format(epoch), logger=logger)
writer = writer_dict['writer']
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
global_steps = writer_dict['train_global_steps']
if lr_scheduler:
current_lr = lr_scheduler.get_lr()
else:
current_lr = cfg.TRAIN.LR
# measure data loading time
data_time.update(time.time() - end)
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
# measure accuracy and record loss
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
losses.update(loss.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# write to logger
writer.add_scalar('lr', current_lr, global_steps)
writer.add_scalar('train_loss', losses.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
# log acc for cross entropy loss
writer.add_scalar('train_acc1', top1.val, global_steps)
writer.add_scalar('train_acc5', top5.val, global_steps)
if i % cfg.PRINT_FREQ == 0:
progress.print(i)
def validate_verification(cfg, model, test_loader):
batch_time = AverageMeter('Time', ':6.3f')
progress = ProgressMeter(
len(test_loader), batch_time, prefix='Test: ', logger=logger)
# switch to evaluate mode
model.eval()
labels, distances = [], []
with torch.no_grad():
end = time.time()
for i, (input1, input2, label) in enumerate(test_loader):
input1 = input1.cuda(non_blocking=True).squeeze(0)
input2 = input2.cuda(non_blocking=True).squeeze(0)
label = label.cuda(non_blocking=True)
# compute output
outputs1 = model(input1).mean(dim=0).unsqueeze(0)
outputs2 = model(input2).mean(dim=0).unsqueeze(0)
dists = F.cosine_similarity(outputs1, outputs2)
dists = dists.data.cpu().numpy()
distances.append(dists)
labels.append(label.data.cpu().numpy())
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 2000 == 0:
progress.print(i)
labels = np.array([sublabel for label in labels for sublabel in label])
distances = np.array([subdist for dist in distances for subdist in dist])
eer = compute_eer(distances, labels)
logger.info('Test EER: {:.8f}'.format(np.mean(eer)))
return eer
def validate_identification(cfg, model, test_loader, criterion):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(test_loader), batch_time, losses, top1, top5, prefix='Test: ', logger=logger)
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(test_loader):
input = input.cuda(non_blocking=True).squeeze(0)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
output = torch.mean(output, dim=0, keepdim=True)
output = model.forward_classifier(output)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
top1.update(acc1[0], input.size(0))
top5.update(acc5[0], input.size(0))
loss = criterion(output, target)
losses.update(loss.item(), 1)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 2000 == 0:
progress.print(i)
logger.info('Test Acc@1: {:.8f} Acc@5: {:.8f}'.format(top1.avg, top5.avg))
return top1.avg
```
|
{
"source": "jeongyoonlee/audiomentations",
"score": 3
}
|
#### File: audiomentations/core/utils.py
```python
import os
from pathlib import Path
import numpy as np
AUDIO_FILENAME_ENDINGS = (".aiff", ".flac", ".m4a", ".mp3", ".ogg", ".opus", ".wav")
def get_file_paths(
root_path, filename_endings=AUDIO_FILENAME_ENDINGS, traverse_subdirectories=True
):
"""Return a list of paths to all files with the given filename extensions in a directory.
Also traverses subdirectories by default.
"""
file_paths = []
for root, dirs, filenames in os.walk(root_path):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.lower().endswith(filename_endings):
file_paths.append(Path(file_path))
if not traverse_subdirectories:
# prevent descending into subfolders
break
return file_paths
def calculate_rms(samples):
"""Given a numpy array of audio samples, return its Root Mean Square (RMS)."""
return np.sqrt(np.mean(np.square(samples), axis=-1))
def calculate_desired_noise_rms(clean_rms, snr):
"""
Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR),
calculate the desired RMS of a noise sound to be mixed in.
Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20
:param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0
:param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60
:return:
"""
a = float(snr) / 20
noise_rms = clean_rms / (10 ** a)
return noise_rms
def convert_decibels_to_amplitude_ratio(decibels):
return 10 ** (decibels / 20)
def is_waveform_multichannel(samples):
"""
Return bool that answers the question: Is the given ndarray a multichannel waveform or not?
:param samples: numpy ndarray
:return:
"""
return len(samples.shape) > 1
def is_spectrogram_multichannel(spectrogram):
"""
Return bool that answers the question: Is the given ndarray a multichannel spectrogram?
:param samples: numpy ndarray
:return:
"""
return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1
def convert_float_samples_to_int16(y):
"""Convert floating-point numpy array of audio samples to int16."""
if not issubclass(y.dtype.type, np.floating):
raise ValueError("input samples not floating-point")
return (y * np.iinfo(np.int16).max).astype(np.int16)
```
#### File: audiomentations/tests/test_clipping_distortion.py
```python
import unittest
import numpy as np
from audiomentations.augmentations.transforms import ClippingDistortion
from audiomentations.core.composition import Compose
class TestClippingDistortion(unittest.TestCase):
def test_distort(self):
sample_len = 1024
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[
ClippingDistortion(
min_percentile_threshold=20, max_percentile_threshold=40, p=1.0
)
]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertEqual(len(samples_out), sample_len)
self.assertLess(sum(abs(samples_out)), sum(abs(samples_in)))
def test_distort_multichannel(self):
sample_len = 32000
samples_in = np.random.normal(0, 1, size=(2, sample_len)).astype(np.float32)
sample_rate = 16000
augmenter = ClippingDistortion(
min_percentile_threshold=20, max_percentile_threshold=40, p=1.0
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertEqual(samples_out.shape, samples_in.shape)
self.assertLess(np.sum(np.abs(samples_out)), np.sum(np.abs(samples_in)))
self.assertAlmostEqual(np.amax(samples_out[0, :]), np.amax(samples_out[1, :]))
```
#### File: audiomentations/tests/test_frequency_mask.py
```python
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from audiomentations.augmentations.transforms import FrequencyMask
from audiomentations.core.composition import Compose
class TestFrequencyMask(unittest.TestCase):
def test_apply_frequency_mask(self):
sample_len = 1024
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
samples_in_copy = np.copy(samples_in)
sample_rate = 16000
augmenter = Compose(
[FrequencyMask(min_frequency_band=0.3, max_frequency_band=0.5, p=1.0)]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertEqual(len(samples_out), sample_len)
# Check that the input is left untouched
assert_array_equal(samples_in, samples_in_copy)
std_in = np.mean(np.abs(samples_in))
std_out = np.mean(np.abs(samples_out))
self.assertLess(std_out, std_in)
def test_apply_frequency_mask_stereo(self):
np.random.seed(42)
sample_len = 1024
samples_in = np.random.uniform(low=-0.5, high=0.5, size=(2, sample_len)).astype(
np.float32
)
samples_in_copy = np.copy(samples_in)
sample_rate = 16000
augmenter = FrequencyMask(min_frequency_band=0.3, max_frequency_band=0.3, p=1.0)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertEqual(samples_out.shape, samples_in.shape)
# Check that the input is left untouched
assert_array_equal(samples_in, samples_in_copy)
std_in = np.mean(np.abs(samples_in))
std_out = np.mean(np.abs(samples_out))
self.assertLess(std_out, std_in)
augmenter.freeze_parameters()
samples_only_1st_channel = augmenter(
samples=samples_in[0, :], sample_rate=sample_rate
)
assert_array_equal(samples_out[0, :], samples_only_1st_channel)
def test_filter_instability(self):
"""
An early implementation of FrequencyMask had a problem with filter instability
sometimes. That would lead to NaN values in the result. This test checks whether or not
the problem currently exists.
"""
np.random.seed(42)
sample_len = 32000
samples_in = np.random.uniform(-1, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose([FrequencyMask(p=1.0)])
augmenter.transforms[0].randomize_parameters(samples_in, sample_rate)
augmenter.transforms[0].parameters["bandwidth"] = 600
augmenter.transforms[0].parameters["freq_start"] = 17
augmenter.freeze_parameters()
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertFalse(np.isnan(samples_out).any())
```
#### File: audiomentations/tests/test_gaussian_noise.py
```python
import unittest
import numpy as np
from audiomentations.augmentations.transforms import AddGaussianNoise
from audiomentations.core.composition import Compose
class TestGaussianNoise(unittest.TestCase):
def test_gaussian_noise(self):
samples = np.zeros((20,), dtype=np.float32)
sample_rate = 16000
augmenter = Compose([AddGaussianNoise(p=1.0)])
samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(samples.dtype, np.float32)
self.assertNotAlmostEqual(float(np.sum(np.abs(samples))), 0.0)
def test_gaussian_noise_stereo(self):
samples = np.zeros((2, 2000), dtype=np.float32)
sample_rate = 16000
augmenter = Compose([AddGaussianNoise(p=1.0)])
samples = augmenter(samples=samples, sample_rate=sample_rate)
self.assertEqual(samples.dtype, np.float32)
self.assertNotAlmostEqual(float(np.sum(np.abs(samples))), 0.0)
```
#### File: audiomentations/tests/test_loudness_normalization.py
```python
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from audiomentations import LoudnessNormalization
class TestLoudnessNormalization(unittest.TestCase):
def test_loudness_normalization(self):
samples = np.random.uniform(low=-0.2, high=-0.001, size=(8000,)).astype(
np.float32
)
sample_rate = 16000
augment = LoudnessNormalization(min_lufs_in_db=-32, max_lufs_in_db=-12, p=1.0)
processed_samples = augment(samples=samples, sample_rate=sample_rate)
gain_factors = processed_samples / samples
self.assertAlmostEqual(np.amin(gain_factors), np.amax(gain_factors), places=6)
self.assertEqual(processed_samples.dtype, np.float32)
def test_loudness_normalization_digital_silence(self):
samples = np.zeros(8000, dtype=np.float32)
sample_rate = 16000
augment = LoudnessNormalization(min_lufs_in_db=-32, max_lufs_in_db=-12, p=1.0)
processed_samples = augment(samples=samples, sample_rate=sample_rate)
assert_almost_equal(processed_samples, np.zeros(8000, dtype=np.float32))
self.assertEqual(processed_samples.dtype, np.float32)
def test_loudness_normalization_too_short_input(self):
samples = np.random.uniform(low=-0.2, high=-0.001, size=(800,)).astype(
np.float32
)
sample_rate = 16000
augment = LoudnessNormalization(min_lufs_in_db=-32, max_lufs_in_db=-12, p=1.0)
with self.assertRaises(ValueError):
_ = augment(samples=samples, sample_rate=sample_rate)
def test_loudness_normalization_multichannel(self):
samples = np.random.uniform(low=-0.2, high=-0.001, size=(3, 8000)).astype(
np.float32
)
sample_rate = 16000
augment = LoudnessNormalization(min_lufs_in_db=-32, max_lufs_in_db=-12, p=1.0)
processed_samples = augment(samples=samples, sample_rate=sample_rate)
gain_factors = processed_samples / samples
self.assertAlmostEqual(np.amin(gain_factors), np.amax(gain_factors), places=6)
self.assertEqual(processed_samples.dtype, np.float32)
```
#### File: audiomentations/tests/test_mp3_compression.py
```python
import unittest
import numpy as np
from audiomentations.augmentations.transforms import Mp3Compression
from audiomentations.core.composition import Compose
class TestMp3Compression(unittest.TestCase):
def test_apply_mp3_compression_pydub(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_lameenc(self):
sample_len = 44100
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 44100
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=48, max_bitrate=48, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_pydub(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="pydub")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_apply_mp3_compression_low_bitrate_lameenc(self):
sample_len = 16000
samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)
sample_rate = 16000
augmenter = Compose(
[Mp3Compression(p=1.0, min_bitrate=8, max_bitrate=8, backend="lameenc")]
)
samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)
self.assertEqual(samples_out.dtype, np.float32)
self.assertGreaterEqual(len(samples_out), sample_len)
self.assertLess(len(samples_out), sample_len + 2500)
def test_invalid_argument_combination(self):
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=400, max_bitrate=800)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=2, max_bitrate=4)
with self.assertRaises(AssertionError):
_ = Mp3Compression(min_bitrate=64, max_bitrate=8)
```
#### File: audiomentations/tests/test_spec_channel_shuffle.py
```python
import os
import unittest
import librosa
import numpy as np
from audiomentations import SpecChannelShuffle
from audiomentations.core.audio_loading_utils import load_sound_file
from audiomentations.core.transforms_interface import MonoAudioNotSupportedException
from demo.demo import DEMO_DIR
from .utils import plot_matrix
DEBUG = False
class TestSpecChannelShuffle(unittest.TestCase):
def test_shuffle_channels(self):
samples, sample_rate = load_sound_file(
os.path.join(DEMO_DIR, "background_noises", "hens.ogg"),
sample_rate=None,
mono=False,
)
assert samples.shape[0] == 2
magnitude_spectrogram_chn0 = librosa.feature.melspectrogram(
y=np.asfortranarray(samples[0, :]), sr=sample_rate
)
magnitude_spectrogram_chn1 = librosa.feature.melspectrogram(
y=np.asfortranarray(samples[1, :]), sr=sample_rate
)
multichannel_magnitude_spectrogram = np.zeros(
shape=(
magnitude_spectrogram_chn0.shape[0],
magnitude_spectrogram_chn0.shape[1],
3,
),
dtype=np.float32,
)
multichannel_magnitude_spectrogram[:, :, 0] = magnitude_spectrogram_chn0
multichannel_magnitude_spectrogram[:, :, 1] = magnitude_spectrogram_chn1
multichannel_magnitude_spectrogram[:, :, 2] = magnitude_spectrogram_chn1 * 0.7
if DEBUG:
image = (7 + np.log10(multichannel_magnitude_spectrogram + 0.0000001)) / 8
plot_matrix(image, title="before")
# Make sure the shuffled channels do not equal the original order
transform = SpecChannelShuffle(p=1.0)
for _ in range(100000):
transform.randomize_parameters(multichannel_magnitude_spectrogram)
if transform.parameters["shuffled_channel_indexes"] != [0, 1, 2]:
break
transform.freeze_parameters()
augmented_spectrogram = transform(multichannel_magnitude_spectrogram)
if DEBUG:
image = (7 + np.log10(augmented_spectrogram + 0.0000001)) / 8
plot_matrix(image, title="after")
with np.testing.assert_raises(AssertionError):
np.testing.assert_array_equal(
augmented_spectrogram, multichannel_magnitude_spectrogram
)
for augmented_index, original_index in enumerate(
transform.parameters.get("shuffled_channel_indexes")
):
np.testing.assert_array_equal(
augmented_spectrogram[:, :, augmented_index],
multichannel_magnitude_spectrogram[:, :, original_index],
)
def test_shuffle_channels_mono(self):
samples, sample_rate = load_sound_file(
os.path.join(DEMO_DIR, "acoustic_guitar_0.wav"), sample_rate=None
)
magnitude_spectrogram = librosa.feature.melspectrogram(
y=samples, sr=sample_rate
)
transform = SpecChannelShuffle(p=1.0)
with self.assertRaises(MonoAudioNotSupportedException):
augmented_spectrogram = transform(magnitude_spectrogram)
def test_empty_spectrogram(self):
spec = np.zeros(shape=(0, 0), dtype=np.float32)
transform = SpecChannelShuffle(p=1.0)
augmented_spectrogram = transform(spec)
np.testing.assert_array_equal(spec, augmented_spectrogram)
```
#### File: audiomentations/tests/test_utils.py
```python
import unittest
from audiomentations.core.utils import (
calculate_desired_noise_rms,
convert_decibels_to_amplitude_ratio,
get_file_paths,
)
from demo.demo import DEMO_DIR
class TestUtils(unittest.TestCase):
def test_calculate_desired_noise_rms(self):
noise_rms = calculate_desired_noise_rms(clean_rms=0.5, snr=6)
self.assertAlmostEqual(noise_rms, 0.2505936168136362)
def test_convert_decibels_to_amplitude_ratio(self):
amplitude_ratio = convert_decibels_to_amplitude_ratio(decibels=-6)
self.assertAlmostEqual(amplitude_ratio, 0.5011872336272722)
amplitude_ratio = convert_decibels_to_amplitude_ratio(decibels=6)
self.assertAlmostEqual(amplitude_ratio, 1.9952623149688795)
def test_get_file_paths_uppercase_extension(self):
file_paths = get_file_paths(DEMO_DIR, traverse_subdirectories=False)
found_it = False
for file_path in file_paths:
if file_path.name == "stereo_24bit.WAV":
found_it = True
break
self.assertTrue(found_it)
```
|
{
"source": "jeongyoonlee/kddcup2019track1",
"score": 3
}
|
#### File: kddcup2019track1/src/build_features.py
```python
import json
import pandas as pd
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
def read_profile_data():
profile_data = pd.read_csv('../input/profiles.csv')
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_na.columns = profile_data.columns
profile_data = profile_data.append(profile_na)
return profile_data
def merge_raw_data():
tr_queries = pd.read_csv('../input/train_queries.csv')
te_queries = pd.read_csv('../input/test_queries.csv')
tr_plans = pd.read_csv('../input/train_plans.csv')
te_plans = pd.read_csv('../input/test_plans.csv')
tr_click = pd.read_csv('../input/train_clicks.csv')
tr_data = tr_queries.merge(tr_click, on='sid', how='left')
tr_data = tr_data.merge(tr_plans, on='sid', how='left')
tr_data = tr_data.drop(['click_time'], axis=1)
tr_data['click_mode'] = tr_data['click_mode'].fillna(0)
te_data = te_queries.merge(te_plans, on='sid', how='left')
te_data['click_mode'] = -1
data = pd.concat([tr_data, te_data], axis=0)
data = data.drop(['plan_time'], axis=1)
data = data.reset_index(drop=True)
print('total data size: {}'.format(data.shape))
print('raw data columns: {}'.format(', '.join(data.columns)))
return data
def gen_od_feas(data):
enc = LabelEncoder()
data['o1'] = data['o'].apply(lambda x: float(x.split(',')[0]))
data['o2'] = data['o'].apply(lambda x: float(x.split(',')[1]))
data['d1'] = data['d'].apply(lambda x: float(x.split(',')[0]))
data['d2'] = data['d'].apply(lambda x: float(x.split(',')[1]))
#data['o_enc'] = enc.fit_transform(data['o'])
#data['d_enc'] = enc.fit_transform(data['d'])
data = data.drop(['o', 'd'], axis=1)
return data
def gen_plan_feas(data):
n = data.shape[0]
mode_list_feas = np.zeros((n, 12))
max_dist, min_dist, mean_dist, std_dist = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_texts = []
for i, plan in tqdm(enumerate(data['plans'].values)):
try:
cur_plan_list = json.loads(plan)
except:
cur_plan_list = []
if len(cur_plan_list) == 0:
mode_list_feas[i, 0] = 1
first_mode[i] = 0
max_dist[i] = -1
min_dist[i] = -1
mean_dist[i] = -1
std_dist[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
min_dist_mode[i] = -1
max_dist_mode[i] = -1
min_price_mode[i] = -1
max_price_mode[i] = -1
min_eta_mode[i] = -1
max_eta_mode[i] = -1
mode_texts.append('word_null')
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in cur_plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
mode_texts.append(
' '.join(['word_{}'.format(mode) for mode in mode_list]))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feas[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_dist[i] = distance_list[distance_sort_idx[-1]]
min_dist[i] = distance_list[distance_sort_idx[0]]
mean_dist[i] = np.mean(distance_list)
std_dist[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
max_dist_mode[i] = mode_list[distance_sort_idx[-1]]
min_dist_mode[i] = mode_list[distance_sort_idx[0]]
max_price_mode[i] = mode_list[price_sort_idx[-1]]
min_price_mode[i] = mode_list[price_sort_idx[0]]
max_eta_mode[i] = mode_list[eta_sort_idx[-1]]
min_eta_mode[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feas)
feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]
feature_data['max_dist'] = max_dist
feature_data['min_dist'] = min_dist
feature_data['mean_dist'] = mean_dist
feature_data['std_dist'] = std_dist
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['max_dist_mode'] = max_dist_mode
feature_data['min_dist_mode'] = min_dist_mode
feature_data['max_price_mode'] = max_price_mode
feature_data['min_price_mode'] = min_price_mode
feature_data['max_eta_mode'] = max_eta_mode
feature_data['min_eta_mode'] = min_eta_mode
feature_data['first_mode'] = first_mode
print('mode tfidf...')
tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))
tfidf_vec = tfidf_enc.fit_transform(mode_texts)
svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)
mode_svd = svd_enc.fit_transform(tfidf_vec)
mode_svd = pd.DataFrame(mode_svd)
mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]
data = pd.concat([data, feature_data, mode_svd], axis=1)
data = data.drop(['plans'], axis=1)
return data
def gen_profile_feas(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=2019)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_fea_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def group_weekday_and_hour(row):
if row['weekday'] == 0 or row['weekday'] == 6:
w = 0
else:
w = row['weekday']
if row['hour'] > 7 and row['hour'] < 18: # 7:00 - 18:00
h = row['hour']
elif row['hour'] >= 18 and row['hour'] < 21: # 18:00 - 21:00
h = 1
elif row['hour'] >= 21 or row['hour'] < 6: # 21:00 - 6:00
h = 0
else: # 6:00 - 7:00
h = 2
return str(w) + '_' + str(h)
def gen_time_feas(data):
data['req_time'] = pd.to_datetime(data['req_time'])
data['weekday'] = data['req_time'].dt.dayofweek
data['hour'] = data['req_time'].dt.hour
#data['group-weekday-n-hour'] = LabelEncoder().fit_transform(data.apply(group_weekday_and_hour, axis = 1))
data = data.drop(['req_time'], axis=1)
return data
def gen_ratio_feas(data):
data['dist-d-eta'] = data['mean_dist'] / data['mean_eta']
data['price-d-dist'] = data['mean_price'] / data['mean_dist']
data['price-d-eta'] = data['mean_price'] / data['mean_eta']
data['o1-d-d1'] = data['o1'] / data['d1']
data['o2-d-d2'] = data['o2'] / data['d2']
return data
def gen_fly_dist_feas(data):
data['fly-dist'] = ((data['d1'] - data['o1'])**2 + (data['d2'] - data['o2'])**2)**0.5
data['fly-dist-d-dist'] = data['fly-dist'] / data['mean_dist']
data['fly-dist-d-eta'] = data['fly-dist'] / data['mean_eta']
data['price-d-fly-dist'] = data['mean_price'] / data['fly-dist']
return data
def gen_aggregate_profile_feas(data):
aggr = data.groupby('pid')['sid'].agg(['count'])
aggr.columns = ['%s_%s' % ('sid', col) for col in aggr.columns.values]
aggr = aggr.reset_index()
aggr.loc[aggr['pid'] == -1.0,'sid_count'] = 0 # reset in case pid == -1
data = data.merge(aggr, how='left', on=['pid'])
return data
def gen_pid_feat(data):
feat = pd.read_csv('../feat/pid_feat.csv')
data = data.merge(feat, how='left', on='pid')
return data
def gen_sid_feat(data):
feat = pd.read_csv('../feat/sid_feat.csv')
data = data.merge(feat, how='left', on='sid')
data['first_mode-eq-min_dist_mode'] = (data['first_mode']==data['min_dist_mode']).astype(int)
data['first_mode-eq-min_eta_mode'] = (data['first_mode']==data['min_eta_mode']).astype(int)
data['first_mode-eq-min_price_mode'] = (data['first_mode']==data['min_price_mode']).astype(int)
return data
def gen_od_feat(data):
feat = pd.read_csv('../feat/od_feat.csv')
tr_sid = pd.read_csv('../input/train_queries.csv', usecols=['sid','o','d'])
te_sid = pd.read_csv('../input/test_queries.csv', usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
print(sid.shape)
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
print(feat.shape)
print(feat.columns)
data = data.merge(feat, how='left', on='sid')
click_cols = [c for c in feat.columns if c.endswith('click')]
data.drop(click_cols, axis=1, inplace=True)
return data
def gen_od_cluster_feat(data):
feat = pd.read_csv('../feat/od_node_cluster.csv')
tr_sid = pd.read_csv('../input/train_queries.csv', usecols=['sid','o','d'])
te_sid = pd.read_csv('../input/test_queries.csv', usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
f = feat.copy()
feat = sid.merge(feat, how='left', left_on='o', right_on='od').drop(['od','o'], axis=1)
feat.rename(columns={'cluster': 'o_cluster'}, inplace=True)
feat = feat.merge(f, how='left', left_on='d', right_on='od').drop(['od','d'], axis=1)
feat.rename(columns={'cluster': 'd_cluster'}, inplace=True)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_eq_feat(data):
data['o1-eq-d1'] = (data['o1'] == data['d1']).astype(int)
data['o2-eq-d2'] = (data['o2'] == data['d2']).astype(int)
data['o-eq-d'] = data['o1-eq-d1']*data['o2-eq-d2']
return data
def gen_encode_feas(data):
return data
def split_train_test(data):
train_data = data[data['click_mode'] != -1]
test_data = data[data['click_mode'] == -1]
submit = test_data[['sid']].copy()
train_data = train_data.drop(['sid', 'pid'], axis=1)
test_data = test_data.drop(['sid', 'pid'], axis=1)
test_data = test_data.drop(['click_mode'], axis=1)
train_y = train_data['click_mode'].values
train_x = train_data.drop(['click_mode'], axis=1)
return train_x, train_y, test_data, submit
def get_train_test_feas_data():
data = merge_raw_data()
data = gen_od_feas(data)
data = gen_plan_feas(data)
data = gen_profile_feas(data)
data = gen_time_feas(data) # 0.6758
data = gen_ratio_feas(data)
data = gen_fly_dist_feas(data)
data = gen_aggregate_profile_feas(data) # 0.6759966661470926
data = gen_pid_feat(data) # 0.6762996872664375
#data = gen_sid_feat(data) # 0.6752915844109314 (not work)
data = gen_od_feat(data) # without click count: 0.6780576865566392; with click count: 0.6795810670221226
data = gen_od_cluster_feat(data) # 0.6796523605372234
#data = gen_od_eq_feat(data)
train_x, train_y, test_x, submit = split_train_test(data)
return train_x, train_y, test_x, submit
if __name__ == '__main__':
pass
```
#### File: kddcup2019track1/src/utils.py
```python
import numpy as np
import pandas as pd
def eval_f(y_pred, train_data):
y_true = train_data.label
y_pred = y_pred.reshape((config.n_class, -1)).T
y_pred = np.argmax(y_pred, axis=1)
score = f1_score(y_true, y_pred, average='weighted')
return 'weighted-f1-score', score, True
def submit_result(submit, result, trn_result, score):
now_time = strftime("%Y-%m-%d-%H-%M-%S", gmtime())
submit['recommend_mode'] = result
submit.to_csv(config.submission_file, index=False)
if trn_result is not None:
submit['recommend_mode'] = trn_result
submit.to_csv(config.trn_submission_file, index=False)
if os.path.exists(config.metric_file):
metric = pd.read_csv(config.metric_file)
metric.append({'model': config.model_name,
'feature': config.feature_name,
'datetime': now_time,
'score': score}, ignore_index=True)
else:
metric = pd.DataFrame({'model': [config.model_name],
'feature': config.feature_name,
'datetime': [now_time],
'score': [score]})
metric.round(6).to_csv(config.metric_file, index=False)
```
|
{
"source": "jeongyoonlee/kddcup2019track2",
"score": 3
}
|
#### File: starting_kit_0401/sample_code_submission/preprocess.py
```python
import datetime
import CONSTANT
from util import log, timeit
import numpy as np
from pandas.api.types import is_categorical_dtype
@timeit
def clean_tables(tables):
for tname in tables:
log(f"cleaning table {tname}")
clean_df(tables[tname])
@timeit
def clean_df(df):
log('memory usage: {:.2f}MB'.format(df.memory_usage().sum() // 1e6))
fillna(df)
cols_to_drop = []
for col in df.columns:
s = df[col]
if s.dtype == np.object:
s = s.astype('category')
if len(s.cat.categories) == 1:
cols_to_drop.append(col)
elif is_categorical_dtype(s):
if len(s.cat.categories) == 1:
cols_to_drop.append(col)
elif s.min() == s.max():
cols_to_drop.append(col)
log('dropping constant features')
log('{}'.format(cols_to_drop))
df.drop(cols_to_drop, axis=1, inplace=True)
log('memory usage: {:.2f}MB'.format(df.memory_usage().sum() // 1e6))
@timeit
def fillna(df):
for c in [c for c in df if c.startswith(CONSTANT.NUMERICAL_PREFIX)]:
df[c].fillna(-1, inplace=True)
for c in [c for c in df if c.startswith(CONSTANT.CATEGORY_PREFIX)]:
df[c].fillna("0", inplace=True)
for c in [c for c in df if c.startswith(CONSTANT.TIME_PREFIX)]:
df[c].fillna(datetime.datetime(1970, 1, 1), inplace=True)
for c in [c for c in df if c.startswith(CONSTANT.MULTI_CAT_PREFIX)]:
df[c].fillna("0", inplace=True)
@timeit
def feature_engineer(df, config):
transform_categorical_hash(df)
transform_datetime(df, config)
@timeit
def transform_datetime(df, config):
for c in [c for c in df if c.startswith(CONSTANT.TIME_PREFIX)]:
df.drop(c, axis=1, inplace=True)
@timeit
def transform_categorical_hash(df):
for c in [c for c in df if c.startswith(CONSTANT.CATEGORY_PREFIX)]:
df[c] = df[c].apply(lambda x: int(x))
for c in [c for c in df if c.startswith(CONSTANT.MULTI_CAT_PREFIX)]:
df[c] = df[c].apply(lambda x: int(x.split(',')[0]))
```
#### File: starting_kit_0401/scoring_program/score.py
```python
import os
from os.path import join, isfile
import sys
import pandas as pd
from sklearn import metrics
from datetime import datetime
def mprint(msg):
"""info"""
cur_time = datetime.now().strftime('%m-%d %H:%M:%S')
print(f"INFO [{cur_time}] {msg}")
def init_dirs():
if len(sys.argv) == 1:
# default local
root_dir = os.getcwd()
dirs = {
'ref': join(root_dir, 'sample_ref'),
'output': join(root_dir, 'sample_scoring_output'),
'prediction': join(root_dir, 'sample_predictions')
}
elif len(sys.argv) == 3:
# codalab
dirs = {
'ref': join(sys.argv[1], 'ref'),
'output': sys.argv[2],
'prediction': join(sys.argv[1], 'res')
}
elif len(sys.argv) == 5 and sys.argv[1] == 'local':
# full call in local
dirs = {
'prediction': join(sys.argv[2]),
'ref': join(sys.argv[3]),
'output': sys.argv[4]
}
else:
raise ValueError("Wrong number of arguments")
os.makedirs(dirs['output'], exist_ok=True)
return dirs
def write_score(dirs, score_file):
datanames = sorted(os.listdir(dirs['ref']))
mprint(f'Datanames: {datanames}')
total_score = 0
for idx, dataname in enumerate(datanames):
auc = get_auc(dirs, dataname)
total_score += auc
score_file.write(f'set{idx+1}_score: {auc}\n')
def get_auc(dirs, dataname):
predict_file = join(dirs['prediction'], f'{dataname}.predict')
if not isfile(predict_file):
mprint(f"{dataname}.predict does not exist")
auc = 0
else:
prediction = pd.read_csv(predict_file)
solution = pd.read_csv(
join(dirs['ref'], dataname, 'main_test.solution'))
try:
auc = metrics.roc_auc_score(solution, prediction)
except:
mprint(f"{dataname}: can not caculate AUC")
auc = 0
mprint(f'{dataname} AUC: {auc}')
return auc
def write_duration(dirs, score_file):
with open(join(dirs['prediction'], 'duration.txt')) as time_f:
time = time_f.read()
score_file.write(f'Duration: {time}\n')
def main():
dirs = init_dirs()
with open(join(dirs['output'], 'scores.txt'), 'w') as score_file:
write_score(dirs, score_file)
write_duration(dirs, score_file)
if __name__ == '__main__':
main()
```
|
{
"source": "Jeonhappywife/MyKiwoom",
"score": 2
}
|
#### File: MyKiwoom/trader/strategy.py
```python
import os
import sys
import psutil
import numpy as np
import pandas as pd
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import ui_num, DICT_SET, columns_gj
from utility.static import now, timedelta_sec, thread_decorator, strf_time, float2str1p6
class Strategy:
def __init__(self, windowQ, traderQ, stgQ):
self.windowQ = windowQ
self.traderQ = traderQ
self.stgQ = stgQ
self.list_buy = [] # ๋งค์์ฃผ๋ฌธ๋ฆฌ์คํธ
self.list_sell = [] # ๋งค๋์ฃผ๋ฌธ๋ฆฌ์คํธ
self.int_tujagm = 0 # ์ข
๋ชฉ๋น ํฌ์๊ธ
self.startjjstg = False # ์ฅ์ค์ ๋ต
self.dict_gsjm = {} # key: ์ข
๋ชฉ์ฝ๋, value: DataFrame
self.dict_data = {} # key: ์ข
๋ชฉ์ฝ๋, value: list
self.dict_high = {} # key: ์ข
๋ชฉ์ฝ๋, value: float
self.dict_time = {
'๊ด์ฌ์ข
๋ชฉ': now(),
'๋ถ๊ฐ์ ๋ณด': now(),
'์ฐ์ฐ์๊ฐ': now()
}
self.dict_intg = {
'์ค๋ ๋': 0,
'์ํผ์ ': 0.,
'๋ฉ๋ชจ๋ฆฌ': 0.
}
self.Start()
def Start(self):
while True:
data = self.stgQ.get()
if type(data) == int:
self.int_tujagm = data
elif type(data) == list:
if len(data) == 2:
self.UpdateList(data[0], data[1])
elif len(data) == 38:
self.BuyStrategy(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8],
data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16],
data[17], data[18], data[19], data[20], data[21], data[22], data[23], data[24],
data[25], data[26], data[27], data[28], data[29], data[30], data[31], data[32],
data[33], data[34], data[35], data[36], data[37])
elif len(data) == 6:
self.SellStrategy(data[0], data[1], data[2], data[3], data[4], data[5])
elif data == '์ ๋ตํ๋ก์ธ์ค์ข
๋ฃ':
break
if now() > self.dict_time['๊ด์ฌ์ข
๋ชฉ']:
self.windowQ.put([ui_num['๊ด์ฌ์ข
๋ชฉ'], self.dict_gsjm])
self.dict_time['๊ด์ฌ์ข
๋ชฉ'] = timedelta_sec(1)
if now() > self.dict_time['๋ถ๊ฐ์ ๋ณด']:
self.UpdateInfo()
self.dict_time['๋ถ๊ฐ์ ๋ณด'] = timedelta_sec(2)
self.windowQ.put([1, '์์คํ
๋ช
๋ น ์คํ ์๋ฆผ - ์ ๋ต ์ฐ์ฐ ํ๋ก์ธ์ค ์ข
๋ฃ'])
def UpdateList(self, gubun, code):
if '์กฐ๊ฑด์ง์
' in gubun:
if code not in self.dict_gsjm.keys():
if int(strf_time('%H%M%S')) < 100000:
data = np.zeros((DICT_SET['์ฅ์ดํ๊ท ๊ฐ๊ณ์ฐํฑ์'] + 2, len(columns_gj))).tolist()
else:
data = np.zeros((DICT_SET['์ฅ์คํ๊ท ๊ฐ๊ณ์ฐํฑ์'] + 2, len(columns_gj))).tolist()
df = pd.DataFrame(data, columns=columns_gj)
self.dict_gsjm[code] = df.copy()
elif gubun == '์กฐ๊ฑด์ดํ':
if code in self.dict_gsjm.keys():
del self.dict_gsjm[code]
elif gubun in ['๋งค์์๋ฃ', '๋งค์์ทจ์']:
if code in self.list_buy:
self.list_buy.remove(code)
elif gubun in ['๋งค๋์๋ฃ', '๋งค๋์ทจ์']:
if code in self.list_sell:
self.list_sell.remove(code)
if code in self.dict_high.keys():
del self.dict_high[code]
def BuyStrategy(self, ํ์ฌ๊ฐ, ์๊ฐ, ๊ณ ๊ฐ, ์ ๊ฐ, ๋ฑ๋ฝ์จ, ๋น์ผ๊ฑฐ๋๋๊ธ, ์ฒด๊ฒฐ๊ฐ๋,
์ด๋น๋งค์์๋, ์ด๋น๋งค๋์๋, VIํด์ ์๊ฐ, VI์๋5ํธ๊ฐ, ๋งค๋์ด์๋, ๋งค์์ด์๋,
๋งค๋ํธ๊ฐ5, ๋งค๋ํธ๊ฐ4, ๋งค๋ํธ๊ฐ3, ๋งค๋ํธ๊ฐ2, ๋งค๋ํธ๊ฐ1, ๋งค์ํธ๊ฐ1, ๋งค์ํธ๊ฐ2, ๋งค์ํธ๊ฐ3, ๋งค์ํธ๊ฐ4, ๋งค์ํธ๊ฐ5,
๋งค๋์๋5, ๋งค๋์๋4, ๋งค๋์๋3, ๋งค๋์๋2, ๋งค๋์๋1, ๋งค์์๋1, ๋งค์์๋2, ๋งค์์๋3, ๋งค์์๋4, ๋งค์์๋5,
์ข
๋ชฉ์ฝ๋, ์ฒด๊ฒฐ์๊ฐ, ํฑ์์ ์๊ฐ, ์ข
๋ชฉ๋ช
, ์๊ณ ์ข
๋ชฉ):
if ์ข
๋ชฉ์ฝ๋ not in self.dict_gsjm.keys():
return
self.CheckStrategy()
๊ณ ์ ํ๊ท = round((๊ณ ๊ฐ + ์ ๊ฐ) / 2)
๊ณ ์ ํ๊ท ๋๋น๋ฑ๋ฝ์จ = round((ํ์ฌ๊ฐ / ๊ณ ์ ํ๊ท - 1) * 100, 2)
์ง์ ๋น์ผ๊ฑฐ๋๋๊ธ = self.dict_gsjm[์ข
๋ชฉ์ฝ๋]['๋น์ผ๊ฑฐ๋๋๊ธ'][0]
์ด๋น๊ฑฐ๋๋๊ธ = 0 if ์ง์ ๋น์ผ๊ฑฐ๋๋๊ธ == 0 else int(๋น์ผ๊ฑฐ๋๋๊ธ - ์ง์ ๋น์ผ๊ฑฐ๋๋๊ธ)
๊ตฌ๋ถ = '์ฅ์ด' if int(strf_time('%H%M%S')) < 100000 else '์ฅ์ค'
ํ๊ท ๊ฐ๊ณ์ฐํฑ์ = DICT_SET[f'{๊ตฌ๋ถ}ํ๊ท ๊ฐ๊ณ์ฐํฑ์']
ํ๊ท ๊ฐ์ธ๋ฑ์ค = ํ๊ท ๊ฐ๊ณ์ฐํฑ์ + 1
self.dict_gsjm[์ข
๋ชฉ์ฝ๋] = self.dict_gsjm[์ข
๋ชฉ์ฝ๋].shift(1)
self.dict_gsjm[์ข
๋ชฉ์ฝ๋].at[0] = ๋ฑ๋ฝ์จ, ๊ณ ์ ํ๊ท ๋๋น๋ฑ๋ฝ์จ, ์ด๋น๊ฑฐ๋๋๊ธ, ๋น์ผ๊ฑฐ๋๋๊ธ, ์ฒด๊ฒฐ๊ฐ๋, 0.
if self.dict_gsjm[์ข
๋ชฉ์ฝ๋]['์ฒด๊ฒฐ๊ฐ๋'][ํ๊ท ๊ฐ๊ณ์ฐํฑ์] != 0.:
์ด๋น๊ฑฐ๋๋๊ธํ๊ท = int(self.dict_gsjm[์ข
๋ชฉ์ฝ๋]['์ด๋น๊ฑฐ๋๋๊ธ'][1:ํ๊ท ๊ฐ์ธ๋ฑ์ค].mean())
์ฒด๊ฒฐ๊ฐ๋ํ๊ท = round(self.dict_gsjm[์ข
๋ชฉ์ฝ๋]['์ฒด๊ฒฐ๊ฐ๋'][1:ํ๊ท ๊ฐ์ธ๋ฑ์ค].mean(), 2)
์ต๊ณ ์ฒด๊ฒฐ๊ฐ๋ = round(self.dict_gsjm[์ข
๋ชฉ์ฝ๋]['์ฒด๊ฒฐ๊ฐ๋'][1:ํ๊ท ๊ฐ์ธ๋ฑ์ค].max(), 2)
self.dict_gsjm[์ข
๋ชฉ์ฝ๋].at[ํ๊ท ๊ฐ์ธ๋ฑ์ค] = 0., 0., ์ด๋น๊ฑฐ๋๋๊ธํ๊ท , 0, ์ฒด๊ฒฐ๊ฐ๋ํ๊ท , ์ต๊ณ ์ฒด๊ฒฐ๊ฐ๋
๋งค์ = True
์ง์ ์ฒด๊ฒฐ๊ฐ๋ = self.dict_gsjm[์ข
๋ชฉ์ฝ๋]['์ฒด๊ฒฐ๊ฐ๋'][1]
self.dict_data[์ข
๋ชฉ์ฝ๋] = [
ํ์ฌ๊ฐ, ์๊ฐ, ๊ณ ๊ฐ, ์ ๊ฐ, ๋ฑ๋ฝ์จ, ๊ณ ์ ํ๊ท ๋๋น๋ฑ๋ฝ์จ, ๋น์ผ๊ฑฐ๋๋๊ธ, ์ด๋น๊ฑฐ๋๋๊ธ, ์ด๋น๊ฑฐ๋๋๊ธํ๊ท , ์ฒด๊ฒฐ๊ฐ๋,
์ฒด๊ฒฐ๊ฐ๋ํ๊ท , ์ต๊ณ ์ฒด๊ฒฐ๊ฐ๋, ์ง์ ์ฒด๊ฒฐ๊ฐ๋, ์ด๋น๋งค์์๋, ์ด๋น๋งค๋์๋, VIํด์ ์๊ฐ, VI์๋5ํธ๊ฐ, ๋งค๋์ด์๋, ๋งค์์ด์๋,
๋งค๋ํธ๊ฐ5, ๋งค๋ํธ๊ฐ4, ๋งค๋ํธ๊ฐ3, ๋งค๋ํธ๊ฐ2, ๋งค๋ํธ๊ฐ1, ๋งค์ํธ๊ฐ1, ๋งค์ํธ๊ฐ2, ๋งค์ํธ๊ฐ3, ๋งค์ํธ๊ฐ4, ๋งค์ํธ๊ฐ5,
๋งค๋์๋5, ๋งค๋์๋4, ๋งค๋์๋3, ๋งค๋์๋2, ๋งค๋์๋1, ๋งค์์๋1, ๋งค์์๋2, ๋งค์์๋3, ๋งค์์๋4, ๋งค์์๋5
]
if ์๊ณ ์ข
๋ชฉ:
return
if ์ข
๋ชฉ์ฝ๋ in self.list_buy:
return
# ์ ๋ต ๋น๊ณต๊ฐ
if ๋งค์:
๋งค์์๋ = int(self.int_tujagm / ํ์ฌ๊ฐ)
if ๋งค์์๋ > 0:
๋จ์์๋ = ๋งค์์๋
์ง์ ๋จ์์๋ = ๋งค์์๋
๋งค์๊ธ์ก = 0
ํธ๊ฐ์ ๋ณด = {๋งค๋ํธ๊ฐ1: ๋งค๋์๋1}
for ๋งค๋ํธ๊ฐ, ๋งค๋์๋ in ํธ๊ฐ์ ๋ณด.items():
๋จ์์๋ -= ๋งค๋์๋
if ๋จ์์๋ <= 0:
๋งค์๊ธ์ก += ๋งค๋ํธ๊ฐ * ์ง์ ๋จ์์๋
break
else:
๋งค์๊ธ์ก += ๋งค๋ํธ๊ฐ * ๋งค๋์๋
์ง์ ๋จ์์๋ = ๋จ์์๋
if ๋จ์์๋ <= 0:
์์์ฒด๊ฒฐ๊ฐ = round(๋งค์๊ธ์ก / ๋งค์์๋, 2)
self.list_buy.append(์ข
๋ชฉ์ฝ๋)
self.traderQ.put(['๋งค์', ์ข
๋ชฉ์ฝ๋, ์ข
๋ชฉ๋ช
, ์์์ฒด๊ฒฐ๊ฐ, ๋งค์์๋])
if now() > self.dict_time['์ฐ์ฐ์๊ฐ']:
gap = float2str1p6((now() - ํฑ์์ ์๊ฐ).total_seconds())
self.windowQ.put([1, f'์ ๋ต์ค ์ฐ์ฐ ์๊ฐ ์๋ฆผ - ์์ ์๊ฐ๊ณผ ์ฐ์ฐ์๊ฐ์ ์ฐจ์ด๋ [{gap}]์ด์
๋๋ค.'])
self.dict_time['์ฐ์ฐ์๊ฐ'] = timedelta_sec(60)
def SellStrategy(self, ์ข
๋ชฉ์ฝ๋, ์ข
๋ชฉ๋ช
, ์์ต๋ฅ , ๋ณด์ ์๋, ํ์ฌ๊ฐ, ๋งค์์๊ฐ):
if ์ข
๋ชฉ์ฝ๋ not in self.dict_gsjm.keys() or ์ข
๋ชฉ์ฝ๋ not in self.dict_data.keys():
return
if ์ข
๋ชฉ์ฝ๋ in self.list_sell:
return
๋งค๋ = False
๊ตฌ๋ถ = '์ฅ์ด' if int(strf_time('%H%M%S')) < 100000 else '์ฅ์ค'
ํ์ฌ๊ฐ, ์๊ฐ, ๊ณ ๊ฐ, ์ ๊ฐ, ๋ฑ๋ฝ์จ, ๊ณ ์ ํ๊ท ๋๋น๋ฑ๋ฝ์จ, ๋น์ผ๊ฑฐ๋๋๊ธ, ์ด๋น๊ฑฐ๋๋๊ธ, ์ด๋น๊ฑฐ๋๋๊ธํ๊ท , ์ฒด๊ฒฐ๊ฐ๋, \
์ฒด๊ฒฐ๊ฐ๋ํ๊ท , ์ต๊ณ ์ฒด๊ฒฐ๊ฐ๋, ์ง์ ์ฒด๊ฒฐ๊ฐ๋, ์ด๋น๋งค์์๋, ์ด๋น๋งค๋์๋, VIํด์ ์๊ฐ, VI์๋5ํธ๊ฐ, ๋งค๋์ด์๋, ๋งค์์ด์๋, \
๋งค๋ํธ๊ฐ5, ๋งค๋ํธ๊ฐ4, ๋งค๋ํธ๊ฐ3, ๋งค๋ํธ๊ฐ2, ๋งค๋ํธ๊ฐ1, ๋งค์ํธ๊ฐ1, ๋งค์ํธ๊ฐ2, ๋งค์ํธ๊ฐ3, ๋งค์ํธ๊ฐ4, ๋งค์ํธ๊ฐ5, \
๋งค๋์๋5, ๋งค๋์๋4, ๋งค๋์๋3, ๋งค๋์๋2, ๋งค๋์๋1, ๋งค์์๋1, ๋งค์์๋2, ๋งค์์๋3, ๋งค์์๋4, ๋งค์์๋5 = \
self.dict_data[์ข
๋ชฉ์ฝ๋]
if ์ข
๋ชฉ์ฝ๋ not in self.dict_high.keys():
self.dict_high[์ข
๋ชฉ์ฝ๋] = ์์ต๋ฅ
elif ์์ต๋ฅ > self.dict_high[์ข
๋ชฉ์ฝ๋]:
self.dict_high[์ข
๋ชฉ์ฝ๋] = ์์ต๋ฅ
์ต๊ณ ์์ต๋ฅ = self.dict_high[์ข
๋ชฉ์ฝ๋]
""" ๋งค๋ ์กฐ๊ฑด ์์ """
if ์์ต๋ฅ <= -2 or ์์ต๋ฅ >= 3:
๋งค๋ = True
# ์ ๋ต ๋น๊ณต๊ฐ
if ๋งค๋:
๋จ์์๋ = ๋ณด์ ์๋
์ง์ ๋จ์์๋ = ๋ณด์ ์๋
๋งค๋๊ธ์ก = 0
ํธ๊ฐ์ ๋ณด = {๋งค์ํธ๊ฐ1: ๋งค์์๋1, ๋งค์ํธ๊ฐ2: ๋งค์์๋2, ๋งค์ํธ๊ฐ3: ๋งค์์๋3, ๋งค์ํธ๊ฐ4: ๋งค์์๋4, ๋งค์ํธ๊ฐ5: ๋งค์์๋5}
for ๋งค์ํธ๊ฐ, ๋งค์์๋ in ํธ๊ฐ์ ๋ณด.items():
๋จ์์๋ -= ๋งค์์๋
if ๋จ์์๋ <= 0:
๋งค๋๊ธ์ก += ๋งค์ํธ๊ฐ * ์ง์ ๋จ์์๋
break
else:
๋งค๋๊ธ์ก += ๋งค์ํธ๊ฐ * ๋งค์์๋
์ง์ ๋จ์์๋ = ๋จ์์๋
if ๋จ์์๋ <= 0:
์์์ฒด๊ฒฐ๊ฐ = round(๋งค๋๊ธ์ก / ๋ณด์ ์๋, 2)
self.list_sell.append(์ข
๋ชฉ์ฝ๋)
self.traderQ.put(['๋งค๋', ์ข
๋ชฉ์ฝ๋, ์ข
๋ชฉ๋ช
, ์์์ฒด๊ฒฐ๊ฐ, ๋ณด์ ์๋])
def CheckStrategy(self):
if int(strf_time('%H%M%S')) >= 100000 and not self.startjjstg:
for code in list(self.dict_gsjm.keys()):
data = np.zeros((DICT_SET['์ฅ์คํ๊ท ๊ฐ๊ณ์ฐํฑ์'] + 2, len(columns_gj))).tolist()
df = pd.DataFrame(data, columns=columns_gj)
self.dict_gsjm[code] = df.copy()
self.startjjstg = True
@thread_decorator
def UpdateInfo(self):
info = [6, self.dict_intg['๋ฉ๋ชจ๋ฆฌ'], self.dict_intg['์ค๋ ๋'], self.dict_intg['์ํผ์ ']]
self.windowQ.put(info)
self.UpdateSysinfo()
def UpdateSysinfo(self):
p = psutil.Process(os.getpid())
self.dict_intg['๋ฉ๋ชจ๋ฆฌ'] = round(p.memory_info()[0] / 2 ** 20.86, 2)
self.dict_intg['์ค๋ ๋'] = p.num_threads()
self.dict_intg['์ํผ์ '] = round(p.cpu_percent(interval=2) / 2, 2)
```
#### File: MyKiwoom/utility/query.py
```python
import sqlite3
import pandas as pd
from utility.setting import DB_STG, DB_TICK
from utility.static import now, float2str1p6
class Query:
def __init__(self, windowQ, traderQ, queryQ):
self.windowQ = windowQ
self.traderQ = traderQ
self.queryQ = queryQ
self.con1 = sqlite3.connect(DB_STG)
self.cur1 = self.con1.cursor()
self.con2 = sqlite3.connect(DB_TICK)
self.cur2 = self.con2.cursor()
self.cur2.execute('pragma journal_mode=WAL')
self.cur2.execute('pragma synchronous=normal')
self.cur2.execute('pragma temp_store=memory')
self.trigger = False
self.remove_trigger()
self.Start()
def __del__(self):
self.remove_trigger()
self.con1.close()
self.con2.close()
def Start(self):
k, j = 0, 0
df = pd.DataFrame()
while True:
query = self.queryQ.get()
if query == '๋๋นํธ๋ฆฌ๊ฑฐ์์':
self.create_trigger()
self.trigger = True
elif query[0] == 1:
if len(query) == 2:
try:
self.cur1.execute(query[1])
except Exception as e:
self.windowQ.put([1, f'์์คํ
๋ช
๋ น ์ค๋ฅ ์๋ฆผ - con1 execute {e}'])
else:
self.con1.commit()
elif len(query) == 4:
try:
query[1].to_sql(query[2], self.con1, if_exists=query[3], chunksize=1000, method='multi')
except Exception as e:
self.windowQ.put([1, f'์์คํ
๋ช
๋ น ์ค๋ฅ ์๋ฆผ - con1 to_sql {e}'])
elif query[0] == 2:
try:
if len(query) == 2:
if type(query[1]) == str:
self.con2.execute(query[1])
self.con2.commit()
else:
start = now()
k += 1
for code in list(query[1].keys()):
query[1][code]['์ข
๋ชฉ์ฝ๋'] = code
df = df.append(query[1][code])
if k == 4 and self.trigger:
df.to_sql("temp", self.con2, if_exists='append', chunksize=1000, method='multi')
self.con2.execute('insert into "dist" ("cnt") values (1);')
save_time = float2str1p6((now() - start).total_seconds())
text = f'์์คํ
๋ช
๋ น ์คํ ์๋ฆผ - ํฑ๋ฐ์ดํฐ ์ ์ฅ ์ฐ๊ธฐ์์์๊ฐ์ [{save_time}]์ด์
๋๋ค.'
self.windowQ.put([1, text])
k = 0
df = pd.DataFrame()
elif len(query) == 3:
start = now()
j += 1
last = len(list(query[1].keys()))
for i, code in enumerate(list(query[1].keys())):
text = f'์์คํ
๋ช
๋ น ์คํ ์๋ฆผ - ์์คํ
๋ช
๋ น ์คํ ์๋ฆผ - ํฑ๋ฐ์ดํฐ ์ ์ฅ ์ค ... [{j}]{i+1}/{last}'
self.windowQ.put([1, text])
query[1][code].to_sql(code, self.con2, if_exists='append', chunksize=1000, method='multi')
save_time = float2str1p6((now() - start).total_seconds())
self.windowQ.put([1, f'์์คํ
๋ช
๋ น ์คํ ์๋ฆผ - ํฑ๋ฐ์ดํฐ ์ ์ฅ ์ฐ๊ธฐ์์์๊ฐ์ [{save_time}]์ด์
๋๋ค.'])
if j == 4:
self.traderQ.put('ํฑ๋ฐ์ดํฐ์ ์ฅ์๋ฃ')
elif len(query) == 4:
query[1].to_sql(query[2], self.con2, if_exists=query[3], chunksize=1000, method='multi')
except Exception as e:
self.windowQ.put([1, f'์์คํ
๋ช
๋ น ์ค๋ฅ ์๋ฆผ - con2 to_sql {e}'])
def create_trigger(self):
res = self.cur2.execute("SELECT name FROM sqlite_master WHERE type='table';")
table_list = []
for name in res.fetchall():
table_list.append(name[0])
const_str = '"index", ํ์ฌ๊ฐ, ์๊ฐ, ๊ณ ๊ฐ, ์ ๊ฐ, ๋ฑ๋ฝ์จ, ๋น์ผ๊ฑฐ๋๋๊ธ, ์ฒด๊ฒฐ๊ฐ๋, ์ด๋น๋งค์์๋, ์ด๋น๋งค๋์๋, VIํด์ ์๊ฐ,' \
'VI์๋5ํธ๊ฐ, ๋งค๋์ด์๋, ๋งค์์ด์๋, ๋งค๋ํธ๊ฐ5, ๋งค๋ํธ๊ฐ4, ๋งค๋ํธ๊ฐ3, ๋งค๋ํธ๊ฐ2, ๋งค๋ํธ๊ฐ1, ๋งค์ํธ๊ฐ1, ๋งค์ํธ๊ฐ2,' \
'๋งค์ํธ๊ฐ3, ๋งค์ํธ๊ฐ4, ๋งค์ํธ๊ฐ5, ๋งค๋์๋5, ๋งค๋์๋4, ๋งค๋์๋3, ๋งค๋์๋2, ๋งค๋์๋1, ๋งค์์๋1, ๋งค์์๋2,' \
'๋งค์์๋3, ๋งค์์๋4, ๋งค์์๋5'
list_stock_table = []
for table_name in table_list:
if len(table_name) == 6:
list_stock_table.append(table_name)
query_create_temp = \
'CREATE TABLE IF NOT EXISTS "temp" ("index" TEXT, "์ข
๋ชฉ์ฝ๋" TEXT, "ํ์ฌ๊ฐ" REAL, "์๊ฐ" REAL, "๊ณ ๊ฐ" REAL,' \
'"์ ๊ฐ" REAL, "๋ฑ๋ฝ์จ" REAL, "๋น์ผ๊ฑฐ๋๋๊ธ" REAL, "์ฒด๊ฒฐ๊ฐ๋" REAL, "์ด๋น๋งค์์๋" REAL, "์ด๋น๋งค๋์๋" REAL,' \
'"VIํด์ ์๊ฐ" TEXT, "VI์๋5ํธ๊ฐ" REAL, "๋งค๋์ด์๋" REAL, "๋งค์์ด์๋" REAL, "๋งค๋ํธ๊ฐ5" REAL, "๋งค๋ํธ๊ฐ4" REAL,' \
'"๋งค๋ํธ๊ฐ3" REAL, "๋งค๋ํธ๊ฐ2" REAL, "๋งค๋ํธ๊ฐ1" REAL, "๋งค์ํธ๊ฐ1" REAL, "๋งค์ํธ๊ฐ2" REAL, "๋งค์ํธ๊ฐ3" REAL,' \
'"๋งค์ํธ๊ฐ4" REAL, "๋งค์ํธ๊ฐ5" REAL, "๋งค๋์๋5" REAL, "๋งค๋์๋4" REAL, "๋งค๋์๋3" REAL, "๋งค๋์๋2" REAL,' \
'"๋งค๋์๋1" REAL, "๋งค์์๋1" REAL, "๋งค์์๋2" REAL, "๋งค์์๋3" REAL, "๋งค์์๋4" REAL, "๋งค์์๋5" REAL);'
query_create_dist = \
'CREATE TABLE IF NOT EXISTS "dist" (uid integer primary key autoincrement, cnt integer,' \
' reg_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL);'
query_create_distchk = \
'CREATE TABLE IF NOT EXISTS "dist_chk" (uid integer primary key autoincrement, cnt integer,' \
'reg_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL);'
s = 'CREATE TRIGGER IF NOT EXISTS "dist_trigger" INSERT ON "dist" BEGIN INSERT INTO "dist_chk" ("cnt") values (1);\n'
for i in range(len(list_stock_table)):
s += 'INSERT INTO "' + list_stock_table[i] + '" SELECT ' + const_str + ' FROM temp WHERE ์ข
๋ชฉ์ฝ๋ = "' + \
list_stock_table[i] + '";\n'
s += 'DELETE FROM temp;\n'
s += 'INSERT INTO "dist_chk" ("cnt") values (2);\n' # ๋๋ฒ๊น
์๋์ธก์ ์ฉ
s += 'END;\n'
query_create_trigger = s
self.cur2.execute(query_create_temp)
self.cur2.execute(query_create_dist)
self.cur2.execute(query_create_distchk)
self.cur2.execute(query_create_trigger)
def remove_trigger(self):
try:
self.cur2.execute('drop trigger dist_trigger;')
except sqlite3.OperationalError:
pass
```
|
{
"source": "JeonHyeongJunKW/DORNG",
"score": 3
}
|
#### File: JeonHyeongJunKW/DORNG/MatchMaker.py
```python
import torch
import torch.nn.functional as F
def find_local_matches(desc1, desc2, kernel_size=9):
# Computes the correlation between each pixel on desc1 with all neighbors
# inside a window of size (kernel_size, kernel_size) on desc2. The match
# vector if then computed by linking each pixel on desc1 with
# the pixel with desc2 with the highest correlation.
#
# This approch requires a lot of memory to build the unfolded descriptor.
# A better approach is to use the Correlation package from e.g.
# https://github.com/NVIDIA/flownet2-pytorch/tree/master/networks/correlation_package
desc2_unfolded = F.unfold(desc2, kernel_size, padding=kernel_size//2)
desc2_unfolded = desc2_unfolded.reshape(
1, desc2.shape[1], kernel_size*kernel_size, desc2.shape[2], desc2.shape[3])
desc1 = desc1.unsqueeze(dim=2)
correlation = torch.sum(desc1 * desc2_unfolded, dim=1)
_, match_idx = torch.max(correlation, dim=1)
hmatch = torch.fmod(match_idx, kernel_size) - kernel_size // 2#height๊ฐ ์๋๋ผ horizontal์ด๋ค.
vmatch = match_idx // kernel_size - kernel_size // 2#vertical์ด๋ค.
matches = torch.cat((hmatch, vmatch), dim=0)
return matches
```
|
{
"source": "Jeonhyunil/Dss8",
"score": 4
}
|
#### File: Dss8/Baekjoon/quick_sort.py
```python
def left_side(s):
if len(s) <= 1: # ๊ธฐ์ ์กฐ๊ฑด = ๋ฌธ์ฅ์ ๊ธธ์ด๊ฐ 1๋ณด๋ค ์๊ฑฐ๋ ๊ฐ์ผ๋ฉด ๊ทธ๋ฅ ์ถ๋ ฅ
return s
pivot = s[len(s)//2] # ๋ฌธ์ฅ์ ์ค๊ฐ์ฏค์ ์ซ์๋ฅผ pivot์ผ๋ก ์ค์
middle = [x for x in s if x == pivot ] # pivot์ด ์ฌ๋ฌ๊ฐ์ผ ์ ์์ผ๋ฏ๋ก ๊ณจ๋ผ๋ด์ค๋ค.
left = [x for x in s if x < pivot] # pivot๋ณด๋ค ์์ ์ซ์๋ค์ left์ ์ ์ฅ
right = [x for x in s if x > pivot] # pivot๋ณด๋ค ํฐ ์ซ์๋ค์ right์ ์ ์ฅ
return left_side(left) + middle + right_side(right) # left์ right ๋ค์ ๊ฐ๊ฐ์ ์์น์ ๋ง๋ ํจ์(left_side(), right_side())๋ก ์ฌ๊ท์์ผ์ค๋ค.
def right_side(s): # ์์ ๋ง์ฐฌ๊ฐ์ง
if len(s) <= 1:
return s
pivot = s[len(s)//2]
middle = [x for x in s if x == pivot ]
left = [x for x in s if x < pivot]
right = [x for x in s if x > pivot]
return left_side(left) + middle + right_side(right)
def quick_sort(s): # ์์ ๋ง์ฐฌ๊ฐ์ง
pivot = s[len(s)//2]
middle = [x for x in s if x == pivot ]
left = [x for x in s if x < pivot]
right = [x for x in s if x > pivot]
return left_side(left) + middle + right_side(right)
```
|
{
"source": "jeonhyunji/web-crawling",
"score": 3
}
|
#### File: web-crawling/src/crawling_main.py
```python
from bs4 import BeautifulSoup
import requests
import json
import os
import hashlib
import textwrap
import argparse
import logging
from crontab import CronTab
from item.ranking_item import RankingItem
BASE_URL = 'https://news.naver.com/main/ranking/popularDay.nhn?rankingType=popular_day'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_FILE = f'{BASE_DIR}/data.json'
UPDATE_NEWS_URL = 'http://localhost:8080/update_news'
if not os.path.exists(f'{BASE_DIR}/../log'):
os.makedirs(f'{BASE_DIR}/../log')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(f'{BASE_DIR}/../log/crawling.log')
formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] (%(filename)s:%(lineno)d) > %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
cron = CronTab(user=True)
def convert_to_ranking_item(rankingItemDiv):
rankingThumbDiv = rankingItemDiv.find('div', class_='ranking_thumb')
rankingTextDiv = rankingItemDiv.find('div', class_='ranking_text')
headlineDiv = rankingTextDiv.find('div', class_='ranking_headline')
ledeDiv = rankingTextDiv.find('div', class_='ranking_lede')
officeDiv = rankingTextDiv.find('div', class_='ranking_office')
viewDiv = rankingTextDiv.find('div', class_='ranking_view')
item = RankingItem()
if rankingThumbDiv is not None:
item.image_link = rankingThumbDiv.a.img.get('src')
item.link = 'https://news.naver.com' + headlineDiv.a.get('href')
item.headline = headlineDiv.a.get('title')
item.lede = ledeDiv.text.strip()
item.office = officeDiv.text
item.view = int(viewDiv.text.replace(',', ''))
return item
def get_crawling_data(sectionId=105, num=30, date=None):
url = f'{BASE_URL}§ionId={sectionId}'
if date:
url = f'{url}&date={date}'
# get html
response = requests.get(url)
logger.info("requests.get(url), url: " + url)
html = response.text
# parse html & get python object
soup = BeautifulSoup(html, 'html.parser')
rankingItems = soup.find_all('li', class_='ranking_item')
# convert to RankingItem
rankingItemList = []
for idx in range(0, num):
item = convert_to_ranking_item(rankingItems[idx])
item.rank = idx + 1
rankingItemList.append(item)
return rankingItemList
def process_crawling_data(ranking_list):
# get before ranking from data.json
before_ranking_dict = read_json_datafile()
# set previous item and set new ranking
ranking_dict = {}
for item in ranking_list:
hash_id_obj = hashlib.sha256()
hash_id_obj.update(item.headline.encode('UTF-8'))
id = hash_id_obj.hexdigest()
# id = hash(item.link)
item.id = id
if before_ranking_dict.get(id) is None:
item.previous_item = None
else:
item.previous_item = RankingItem.from_dict(
before_ranking_dict.get(id))
item.previous_item.previous_item = None
pass
ranking_dict[id] = item.to_dict()
# write new ranking to data.json
write_json_datafile(ranking_dict)
def read_json_datafile():
if os.path.isfile(DATA_FILE):
with open(DATA_FILE, 'r') as data_file:
before_raw_json = data_file.read().encode('utf-8')
before_ranking_json = json.loads(before_raw_json)
return before_ranking_json
else:
return {}
def write_json_datafile(ranking_dict):
with open(DATA_FILE, 'w') as data_file:
json.dump(ranking_dict, data_file, ensure_ascii=False)
def notify_to_server():
response = requests.get(UPDATE_NEWS_URL)
logger.info("requests.get(UPDATE_NEWS_URL), UPDATE_NEWS_URL: " + UPDATE_NEWS_URL)
logger.info(f"UPDATE_NEWS response status code: {response.status_code}")
return response
def cron_job_start():
if cron_job_ls() is True:
print("Already running crawling daemon")
else:
job = cron.new(command=f'python {BASE_DIR}/crawling_main.py', comment="crawling-daemon")
job.minute.every(1)
cron.write()
print("Start crawling daemon")
def cron_job_stop():
cron.remove_all()
cron.write()
print("Stop crawling daemon")
def cron_job_ls():
if len(cron) == 0:
return False
else:
return True
def main():
# get ranking data
ranking_list = get_crawling_data()
# process data
process_crawling_data(ranking_list)
logger.info("RUN Crawling, update crawling data(data.json)")
# notify to server
# notify_to_server()
if __name__ == "__main__":
parser = argparse.ArgumentParser(usage='%(prog)s -c [command]')
parser.add_argument('-c', metavar='-c', type=str, nargs='?',
help='command for processing crontab', choices=['start', 'stop', 'status'])
args = parser.parse_args()
if args.c is None:
main()
elif args.c == 'start':
cron_job_start()
elif args.c == 'stop':
cron_job_stop()
else:
print(cron_job_ls())
```
#### File: src/item/ranking_item.py
```python
class RankingItem(object):
id = 0
rank = 0
image_link = None
link = None
headline = None
lede = None
office = None
view = 0
previous_item = None
def to_dict(self):
return {
'id': self.id,
'rank': self.rank,
'image_link': self.image_link,
'link': self.link,
'headline': self.headline,
'lede': self.lede,
'office': self.office,
'view': self.view,
'previous_item': {} if self.previous_item is None else self.previous_item.to_dict()
}
@staticmethod
def from_dict(dictObj):
item = RankingItem()
item.id = dictObj['id']
item.rank = dictObj['rank']
item.image_link = dictObj['image_link']
item.link = dictObj['link']
item.headline = dictObj['headline']
item.lede = dictObj['lede']
item.office = dictObj['office']
item.view = dictObj['view']
item.previous_item = None if dictObj['previous_item'] == {} else RankingItem.from_dict(dictObj['previous_item'])
return item
def print_item(self):
print('======================== print_item ========================')
print(f' id: {self.id}')
print(f' rank: {self.rank}')
print(f' image_link: {self.image_link}')
print(f' link: {self.link}')
print(f' headline: {self.headline}')
print(f' lede: {self.lede}')
print(f' office: {self.office}')
print(f' view: {self.view}')
```
#### File: web-crawling/src/test_kakao_bot_main.py
```python
import unittest
class TestFlask(unittest.TestCase):
def setUp(self):
return super().setUp()
```
|
{
"source": "JeonMoonSu/mask_rcnn_person_size",
"score": 2
}
|
#### File: JeonMoonSu/mask_rcnn_person_size/afreecatv.py
```python
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt,QThread
from PyQt5 import QtCore,QtGui,QtWidgets,uic
import cv2
import os
import sys
import json
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import skimage
import threading
import time
import queue
from mrcnn import visualize
from mrcnn.visualize import display_instances
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from pose import OpenPoseImage
from pose.OpenPoseImage import GetPersonPoint
matplotlib.use('Agg')
# epoch number
EPOCH_NUM = 10
SP_EPOCH = 100
ROOT_DIR = os.path.abspath("../../")
DATASET_DIR = ""
WEIGHTS_PATH = ""
FILE_PATH = ""
active =False
running = False
capture_thread = None
form_class = uic.loadUiType("simple.ui")[0]
q = queue.Queue()
q2 = queue.Queue()
t_model = None
num=0
num2=0
sys.path.append(ROOT_DIR)
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR,"mask_rcnn_coco.h5")
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR,"logs")
class VThread(QThread):
def __init__(self):
QThread.__init__(self)
def run(self):
global q
global running
capture = cv2.VideoCapture(0)
while(running):
frame = {}
plt.clf()
retval, img = capture.read()
if not retval:
break
frame["img"] = img
q.put(frame)
capture.release()
class OwnImageWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(OwnImageWidget, self).__init__(parent)
self.image = None
self.setMinimumSize(800,600)
def setImage(self, image):
self.image = image
self.update()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
if self.image:
qp.drawImage(QtCore.QPoint(0, 0), self.image)
print("Paint Event")
qp.end()
class TestWindow(QDialog,form_class):
def measureClicked(self):
global active
global running
if running:
active=True
def runClicked(self):
global running
running = True
if self.capture_thread.isRunning():
self.capture_thread.terminate()
self.capture_thread.wait()
self.capture_thread.start()
else:
self.capture_thread.start()
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
global q
q.queue.clear()
class InferenceConfig(PersonConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
#Make inference model
self.capture_thread = VThread()
self.config = InferenceConfig()
self.config.display()
self.model = modellib.MaskRCNN(mode="inference",config = self.config,model_dir = DEFAULT_LOGS_DIR)
#Widget
self.label1 = QLabel("<NAME>")
self.label2 = QLabel("๋ณํ์ ")
self.label1.setAlignment(Qt.AlignCenter)
self.label2.setAlignment(Qt.AlignCenter)
self.table = QTableWidget(12,4,self)
self.table.setHorizontalHeaderLabels(["Height","Weight","Size(Top)","Size(Pants)"])
self.play_button = QPushButton("Play Video")
self.play_button.clicked.connect(self.runClicked)
self.play_button.setToolTip('Get the frame by webcam')
self.measure_button = QPushButton("Measure")
self.measure_button.clicked.connect(self.measureClicked)
self.measure_button.setToolTip('Measuring instance profile')
self.ImgWidget = OwnImageWidget()
self.window_width = self.ImgWidget.frameSize().width()
self.window_height = self.ImgWidget.frameSize().height()
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
#Layout
layout = QGridLayout()
layout.addWidget(self.label1,0,0)
layout.addWidget(self.label2,0,1)
layout.addWidget(self.ImgWidget,1,0)
layout.addWidget(self.table,1,1)
horLayout = QHBoxLayout()
horLayout.addStretch(1)
horLayout.addWidget(self.play_button)
horLayout.addStretch(1)
layout.addLayout(horLayout,2,0)
horLayout2 = QHBoxLayout()
horLayout2.addStretch(1)
horLayout2.addWidget(self.measure_button)
horLayout2.addStretch(1)
layout.addLayout(horLayout2,2,1)
self.setLayout(layout)
self.setGeometry(200,200,1350,650)
#Timer
self.timer = QtCore.QTimer(self)
self.timer.setInterval(10)
self.timer.timeout.connect(self.update_frame)
self.timer.start()
def retranslateUi(self,TestQFileDialog):
_translate = QtCore.QCoreApplication.translate
TestQFileDialog.setWindowTitle(_translate("TestQFileDialog","Dialog"))
def update_frame(self):
if not q.empty():
frame = q.get()
img = frame["img"]
img = cv2.resize(img, None, fx=1.0, fy=1.0, interpolation = cv2.INTER_NEAREST)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width, bpc = img.shape
bpl = bpc * width
image = QtGui.QImage(img.data, width, height, bpl, QtGui.QImage.Format_RGB888)
self.ImgWidget.setImage(image)
def closeEvent(self,event):
global running
running = False
print("end testing")
#train
class TrainWindow(QDialog):
def _open_file_dialog1(self):
directory = str(QFileDialog.getExistingDirectory())
self.lineEdit1.setText('{}'.format(directory))
def _open_file_dialog2(self):
directory = QFileDialog.getOpenFileName(self,'Open file','/home/default')
self.lineEdit2.setText(directory[0])
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
#train model
self.config = PersonConfig()
self.config.display()
self.model = modellib.MaskRCNN(mode = "training", config = self.config, model_dir = DEFAULT_LOGS_DIR)
#pyqt
layout = QGridLayout()
boxlayout = QBoxLayout(QBoxLayout.TopToBottom,self)
self.toolButtonOpenDialog1 = QToolButton(self)
self.toolButtonOpenDialog1.setGeometry(QtCore.QRect(210,10,25,19))
self.toolButtonOpenDialog1.setObjectName("toolButtonOpenDialog")
self.toolButtonOpenDialog1.clicked.connect(self._open_file_dialog1)
self.toolButtonOpenDialog2 = QToolButton(self)
self.toolButtonOpenDialog2.setGeometry(QtCore.QRect(210,10,25,19))
self.toolButtonOpenDialog2.setObjectName("toolButtonOpenDialog2")
self.toolButtonOpenDialog2.clicked.connect(self._open_file_dialog2)
self.lineEdit1 = QLineEdit(self)
self.lineEdit1.setEnabled(False)
self.lineEdit1.setGeometry(QtCore.QRect(10, 10, 191, 20))
self.lineEdit2 = QLineEdit(self)
self.lineEdit2.setEnabled(False)
self.lineEdit2.setGeometry(QtCore.QRect(10, 10, 191, 20))
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
self.setGeometry(1100,200,300,100)
self.setWindowTitle("train")
btn1 = QPushButton("Start Training")
btn2 = QPushButton("Close")
btn1.clicked.connect(self.trainClicked)
btn2.clicked.connect(self.closeClicked)
self.groupbox = QGroupBox("",self)
self.groupbox.setLayout(boxlayout)
self.chk1 = QRadioButton("COCO",self)
self.chk2 = QRadioButton("LAST",self)
self.chk3 = QRadioButton("Other",self)
boxlayout.addWidget(self.chk1)
boxlayout.addWidget(self.chk2)
boxlayout.addWidget(self.chk3)
label1 = QLabel("Epoch : ")
label2 = QLabel("Numbers per epoch : ")
label3 = QLabel("Location of dataset : ")
label4 = QLabel("Weight : ")
self.spinBox1 = QSpinBox(self)
self.spinBox2 = QSpinBox(self)
self.spinBox1.setMaximum(1000)
self.spinBox2.setMaximum(1000)
self.spinBox1.setValue(10)
self.spinBox2.setValue(100)
layout.addWidget(label1,0,0)
layout.addWidget(label2,1,0)
layout.addWidget(label3,2,0)
layout.addWidget(label4,4,0)
layout.addWidget(self.spinBox1,0,1)
layout.addWidget(self.spinBox2,1,1)
layout.addWidget(self.toolButtonOpenDialog1,2,1)
layout.addWidget(self.lineEdit1,3,0,1,2)
layout.addWidget(self.groupbox,4,1)
layout.addWidget(self.toolButtonOpenDialog2,5,1)
layout.addWidget(self.lineEdit2,6,0,1,2)
layout.addWidget(btn1,7,0)
layout.addWidget(btn2,7,1)
self.setLayout(layout)
self.setGeometry(300,300,300,200)
def trainClicked(self):
global EPOCH_NUM
global SP_EPOCH
EPOCH_NUM = self.spinBox1.value()
SP_EPOCH = self.spinBox2.value()
self.config.STEPS_PER_EPOCH = SP_EPOCH
#dataset path
global DATASET_DIR
DATASET_DIR = self.lineEdit1.text()
if DATASET_DIR == "":
QMessageBox.about(self,"error","Set dataset directory!")
#weights path
global WEIGHTS_PATH
WEIGHTS_PATH = self.lineEdit2.text()
if self.chk1.isChecked():
WEIGHTS_PATH = COCO_WEIGHTS_PATH
elif self.chk2.isChecked():
WEIGHTS_PATH = self.model.find_last()
elif self.chk3.isChecked():
if WEIGHTS_PATH == "":
QMessageBox.about(self,"error","Set weight directory!")
else:
QMessageBox.about(self,"error","Check weight!")
#load weights
if DATASET_DIR == "" or WEIGHTS_PATH == "":
print("Check before run!")
else:
print("epoch_num is : ",EPOCH_NUM)
print("steps per epoch is : ",SP_EPOCH)
print("dataset directory is : ",DATASET_DIR)
print("weights directory is : ",WEIGHTS_PATH)
if WEIGHTS_PATH == COCO_WEIGHTS_PATH:
self.model.load_weights(WEIGHTS_PATH,by_name = True, exclude=["mrcnn_class_logits","mrcnn_bbox_fc","mrcnn_bbox","mrcnn_mask"])
else:
self.model.load_weights(WEIGHTS_PATH,by_name=True)
train(self.model)
def closeClicked(self):
self.close()
def retranslateUi(self,TestQFileDialog):
_translate = QtCore.QCoreApplication.translate
TestQFileDialog.setWindowTitle(_translate("TestQFileDialog","Dialog"))
self.toolButtonOpenDialog1.setIcon(QtGui.QIcon('./icon.png'))
self.toolButtonOpenDialog2.setIcon(QtGui.QIcon('./icon.png'))
class ExWindow(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
label = QLabel('Mask R-CNN Person',self)
label.setAlignment(Qt.AlignCenter)
font = label.font()
font.setBold(True)
btn1 = QPushButton('Train',self)
btn2 = QPushButton('Test',self)
btn3 = QPushButton('Close',self)
btn1.clicked.connect(self.trainEvent)
btn2.clicked.connect(self.testEvent)
btn3.clicked.connect(self.close)
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(btn1)
vbox.addWidget(btn2)
vbox.addWidget(btn3)
self.setLayout(vbox)
self.setGeometry(800,200,300,300)
self.show()
def testEvent(self):
dia = TestWindow()
dia.exec_()
def trainEvent(self):
dia = TrainWindow()
dia.exec_()
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', 'Are you sure to quit?',QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
class PersonConfig(Config):
NAME = "person"
GPU_COUNT = 1
IMAGES_PER_GPU = 2
NUM_CLASSES = 1 + 1 # Background + objects
STEPS_PER_EPOCH = SP_EPOCH
DETECTION_MIN_CONFIDENCE = 0.9
class PersonDataset(utils.Dataset):
def load_VIA(self, dataset_dir, subset, hc=False):
self.add_class("person", 1, "person")
#self.add_class("pig", 2, "lying_pig")
assert subset in ["train","val"]
dataset_dir = os.path.join(dataset_dir, subset)
annotations1 = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations1.values())
annotations = [a for a in annotations if a['regions']]
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
polygons = [r['shape_attributes'] for r in a['regions'].values()]
#names = [r['region_attributes'] for r in a['regions'].values()]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"person",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons)
#names=names)
def load_mask(self, image_id):
image_info = self.image_info[image_id]
if image_info["source"] != "person":
return super(self.__class__, self).load_mask(image_id)
info = self.image_info[image_id]
#class_names = info["names"]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])], dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
#class_ids = np.zeros([len(info["polygons"])])
#for i, p in enumerate(class_names):
#if p['name'] == 'standing_pig':
#class_ids[i] = 1
#elif p['name'] == 'lying_pig':
#class_ids[i] = 2
#assert code here to extend to other labels
#class_ids = class_ids.astype(int)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
#return mask.astype(np.bool), class_ids
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "person":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def detect_and_color_splash(model, image_path=None, video_path=None, out_dir=''):
assert image_path or video_path
class_names = ['BG', 'person']
# Image or video?
if image_path:
# Read image
image = skimage.io.imread(FILE_PATH)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash and save
masked_image = visualize.display_instances2(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'],"image")
return masked_image
elif video_path:
# Video capture
vcapture = cv2.VideoCapture(video_path)
# width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
# height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
count = 0
success = True
#For video, we wish classes keep the same mask in frames, generate colors for masks
colors = visualize.random_colors(len(class_names))
while success:
print("frame: ", count)
plt.clf()
plt.close()
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color splash
# splash = color_splash(image, r['masks'])
splash = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], colors=colors,making_video=True)
# Add image to video writer
cv2.imshow('img',splash)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count += 1
vcapture.release()
def train(model):
dataset_train = PersonDataset()
dataset_train.load_VIA(DATASET_DIR, "train")
dataset_train.prepare()
dataset_val = PersonDataset()
dataset_val.load_VIA(DATASET_DIR, "val")
dataset_val.prepare()
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=PersonConfig().LEARNING_RATE,
epochs=model.epoch+EPOCH_NUM,
layers='heads')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = ExWindow()
sys.exit(app.exec_())
```
|
{
"source": "JeonMoonSu/mask_r_cnn_pigdetection",
"score": 2
}
|
#### File: JeonMoonSu/mask_r_cnn_pigdetection/main.py
```python
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
from PyQt5 import QtCore,QtGui,QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import cv2
import os
import sys
import json
import datetime
import numpy as np
import matplotlib.pyplot as plt
import skimage
from mrcnn import visualize
from mrcnn.visualize import display_instances
# epoch number
EPOCH_NUM = 10
SP_EPOCH = 100
ROOT_DIR = os.path.abspath("../../")
DATASET_DIR = ""
WEIGHTS_PATH = ""
IMAGE_PATH = ""
sys.path.append(ROOT_DIR)
from mrcnn.config import Config
from mrcnn import model as modellib, utils
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR,"mask_rcnn_coco.h5")
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR,"logs")
class TestWindow(QDialog):
def _open_file_dialog1(self):
directory = QFileDialog.getOpenFileName(self,'Open file','/home/default/logs')
self.lineEdit1.setText(directory[0])
def _open_file_dialog2(self):
directory = QFileDialog.getOpenFileName(self,'Open file','/home/default')
self.lineEdit2.setText(directory[0])
def runClicked(self):
global WEIGHTS_PATH
global IMAGE_PATH
WEIGHTS_PATH = self.lineEdit1.text()
IMAGE_PATH = self.lineEdit2.text()
if WEIGHTS_PATH == "" or IMAGE_PATH == "":
QMessageBox.about(self,"error","You missed something")
else:
self.model.load_weights(WEIGHTS_PATH,by_name=True)
masked_image=detect_and_color_splash(self.model, image_path=IMAGE_PATH,video_path="./")
masked_image.show()
print("๋")
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
class InferenceConfig(PigConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
self.config = InferenceConfig()
self.config.display()
self.model = modellib.MaskRCNN(mode="inference",config = self.config,model_dir = DEFAULT_LOGS_DIR)
label1 = QLabel('Weight:')
label2 = QLabel('Image:')
self.lineEdit1 = QLineEdit()
self.lineEdit1.setEnabled(False)
self.lineEdit1.setGeometry(QtCore.QRect(10, 10, 191, 20))
self.lineEdit2 = QLineEdit()
self.lineEdit2.setEnabled(False)
self.lineEdit2.setGeometry(QtCore.QRect(10, 10, 191, 20))
self.toolButtonOpenDialog1 = QToolButton(self)
self.toolButtonOpenDialog1.setGeometry(QtCore.QRect(210,10,25,19))
self.toolButtonOpenDialog1.setObjectName("toolButtonOpenDialog")
self.toolButtonOpenDialog1.clicked.connect(self._open_file_dialog1)
self.toolButtonOpenDialog2 = QToolButton(self)
self.toolButtonOpenDialog2.setGeometry(QtCore.QRect(210,10,25,19))
self.toolButtonOpenDialog2.setObjectName("toolButtonOpenDialog")
self.toolButtonOpenDialog2.clicked.connect(self._open_file_dialog2)
self.pushButton = QPushButton("run test")
self.pushButton.clicked.connect(self.runClicked)
self.fig = plt.Figure()
self.canvas = FigureCanvas(self.fig)
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
#Layout
myLayout1 = QHBoxLayout()
myLayout2 = QHBoxLayout()
myLayout1.addWidget(label1)
myLayout1.addWidget(self.toolButtonOpenDialog1)
myLayout2.addWidget(label2)
myLayout2.addWidget(self.toolButtonOpenDialog2)
self.leftLayout = QVBoxLayout()
self.leftLayout.addWidget(self.canvas)
rightLayout = QVBoxLayout()
rightLayout.addLayout(myLayout1)
rightLayout.addWidget(self.lineEdit1)
rightLayout.addLayout(myLayout2)
rightLayout.addWidget(self.lineEdit2)
rightLayout.addWidget(self.pushButton)
rightLayout.addStretch(1)
layout = QHBoxLayout()
layout.addLayout(self.leftLayout)
layout.addLayout(rightLayout)
layout.setStretchFactor(self.leftLayout,4)
layout.setStretchFactor(rightLayout,1)
self.setLayout(layout)
self.setGeometry(100,100,900,600)
def retranslateUi(self,TestQFileDialog):
_translate = QtCore.QCoreApplication.translate
TestQFileDialog.setWindowTitle(_translate("TestQFileDialog","Dialog"))
self.toolButtonOpenDialog1.setIcon(QtGui.QIcon('./icon.png'))
self.toolButtonOpenDialog2.setIcon(QtGui.QIcon('./icon.png'))
#train
class TrainWindow(QDialog):
def _open_file_dialog1(self):
directory = str(QFileDialog.getExistingDirectory())
self.lineEdit1.setText('{}'.format(directory))
def _open_file_dialog2(self):
directory = QFileDialog.getOpenFileName(self,'Open file','/home/default')
self.lineEdit2.setText(directory[0])
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
#train model
self.config = PigConfig()
self.config.display()
self.model = modellib.MaskRCNN(mode = "training", config = self.config, model_dir = DEFAULT_LOGS_DIR)
#pyqt
layout = QGridLayout()
boxlayout = QBoxLayout(QBoxLayout.TopToBottom,self)
self.toolButtonOpenDialog1 = QToolButton(self)
self.toolButtonOpenDialog1.setGeometry(QtCore.QRect(210,10,25,19))
self.toolButtonOpenDialog1.setObjectName("toolButtonOpenDialog")
self.toolButtonOpenDialog1.clicked.connect(self._open_file_dialog1)
self.toolButtonOpenDialog2 = QToolButton(self)
self.toolButtonOpenDialog2.setGeometry(QtCore.QRect(210,10,25,19))
self.toolButtonOpenDialog2.setObjectName("toolButtonOpenDialog2")
self.toolButtonOpenDialog2.clicked.connect(self._open_file_dialog2)
self.lineEdit1 = QLineEdit(self)
self.lineEdit1.setEnabled(False)
self.lineEdit1.setGeometry(QtCore.QRect(10, 10, 191, 20))
self.lineEdit2 = QLineEdit(self)
self.lineEdit2.setEnabled(False)
self.lineEdit2.setGeometry(QtCore.QRect(10, 10, 191, 20))
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
self.setGeometry(1100,200,300,100)
self.setWindowTitle("train")
btn1 = QPushButton("Start Training")
btn2 = QPushButton("Close")
btn1.clicked.connect(self.trainClicked)
btn2.clicked.connect(self.closeClicked)
self.groupbox = QGroupBox("",self)
self.groupbox.setLayout(boxlayout)
self.chk1 = QRadioButton("COCO",self)
self.chk2 = QRadioButton("LAST",self)
self.chk3 = QRadioButton("Other",self)
boxlayout.addWidget(self.chk1)
boxlayout.addWidget(self.chk2)
boxlayout.addWidget(self.chk3)
label1 = QLabel("Epoch : ")
label2 = QLabel("Numbers per epoch : ")
label3 = QLabel("Location of dataset : ")
label4 = QLabel("Weight : ")
self.spinBox1 = QSpinBox(self)
self.spinBox2 = QSpinBox(self)
self.spinBox1.setMaximum(1000)
self.spinBox2.setMaximum(1000)
self.spinBox1.setValue(10)
self.spinBox2.setValue(100)
layout.addWidget(label1,0,0)
layout.addWidget(label2,1,0)
layout.addWidget(label3,2,0)
layout.addWidget(label4,4,0)
layout.addWidget(self.spinBox1,0,1)
layout.addWidget(self.spinBox2,1,1)
layout.addWidget(self.toolButtonOpenDialog1,2,1)
layout.addWidget(self.lineEdit1,3,0,1,2)
layout.addWidget(self.groupbox,4,1)
layout.addWidget(self.toolButtonOpenDialog2,5,1)
layout.addWidget(self.lineEdit2,6,0,1,2)
layout.addWidget(btn1,7,0)
layout.addWidget(btn2,7,1)
self.setLayout(layout)
self.setGeometry(300,300,300,200)
def trainClicked(self):
global EPOCH_NUM
global SP_EPOCH
EPOCH_NUM = self.spinBox1.value()
SP_EPOCH = self.spinBox2.value()
self.config.STEPS_PER_EPOCH = SP_EPOCH
#dataset path
global DATASET_DIR
DATASET_DIR = self.lineEdit1.text()
if DATASET_DIR == "":
QMessageBox.about(self,"error","Set dataset directory!")
#weights path
global WEIGHTS_PATH
WEIGHTS_PATH = self.lineEdit2.text()
if self.chk1.isChecked():
WEIGHTS_PATH = COCO_WEIGHTS_PATH
elif self.chk2.isChecked():
WEIGHTS_PATH = self.model.find_last()
elif self.chk3.isChecked():
if WEIGHTS_PATH == "":
QMessageBox.about(self,"error","Set weight directory!")
else:
QMessageBox.about(self,"error","Check weight!")
#load weights
if DATASET_DIR == "" or WEIGHTS_PATH == "":
print("ํ๊ธฐ์ ์ ์ข ์ฒดํฌ์ขํด๋ผ ")
else:
print("epoch_num is : ",EPOCH_NUM)
print("steps per epoch is : ",SP_EPOCH)
print("dataset directory is : ",DATASET_DIR)
print("weights directory is : ",WEIGHTS_PATH)
if WEIGHTS_PATH == COCO_WEIGHTS_PATH:
self.model.load_weights(WEIGHTS_PATH,by_name = True, exclude=["mrcnn_class_logits","mrcnn_bbox_fc","mrcnn_bbox","mrcnn_mask"])
else:
self.model.load_weights(WEIGHTS_PATH,by_name=True)
train(self.model)
self.close()
def closeClicked(self):
self.close()
def retranslateUi(self,TestQFileDialog):
_translate = QtCore.QCoreApplication.translate
TestQFileDialog.setWindowTitle(_translate("TestQFileDialog","Dialog"))
self.toolButtonOpenDialog1.setText(_translate("TestQfileDialog","..."))
self.toolButtonOpenDialog2.setText(_translate("TestQfileDialog2","..."))
class ExWindow(QWidget):
def __init__(self):
super().__init__()
self.init_ui()
def init_ui(self):
label = QLabel('Mask R-CNN PIG',self)
label.setAlignment(Qt.AlignCenter)
font = label.font()
font.setBold(True)
btn1 = QPushButton('Train',self)
btn2 = QPushButton('Test',self)
btn3 = QPushButton('Close',self)
btn1.clicked.connect(self.trainEvent)
btn2.clicked.connect(self.testEvent)
btn3.clicked.connect(self.close)
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(btn1)
vbox.addWidget(btn2)
vbox.addWidget(btn3)
self.setLayout(vbox)
self.setGeometry(800,200,300,300)
self.show()
def testEvent(self):
dia = TestWindow()
dia.exec_()
def trainEvent(self):
dia = TrainWindow()
dia.exec_()
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message', 'Are you sure to quit?',QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
class PigConfig(Config):
NAME = "pig"
GPU_COUNT = 1
IMAGES_PER_GPU = 4
NUM_CLASSES = 1 + 2 # Background + objects
STEPS_PER_EPOCH = SP_EPOCH
DETECTION_MIN_CONFIDENCE = 0.9
class PigDataset(utils.Dataset):
def load_VIA(self, dataset_dir, subset, hc=False):
self.add_class("pig", 1, "standing_pig")
self.add_class("pig", 2, "lying_pig")
assert subset in ["train","val"]
dataset_dir = os.path.join(dataset_dir, subset)
annotations1 = json.load(open(os.path.join(dataset_dir, "via_region_data.json")))
annotations = list(annotations1.values())
annotations = [a for a in annotations if a['regions']]
for a in annotations:
# Get the x, y coordinaets of points of the polygons that make up
# the outline of each object instance. There are stores in the
# shape_attributes (see json format above)
polygons = [r['shape_attributes'] for r in a['regions'].values()]
names = [r['region_attributes'] for r in a['regions'].values()]
# load_mask() needs the image size to convert polygons to masks.
# Unfortunately, VIA doesn't include it in JSON, so we must read
# the image. This is only managable since the dataset is tiny.
image_path = os.path.join(dataset_dir, a['filename'])
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
"pig",
image_id=a['filename'], # use file name as a unique image id
path=image_path,
width=width, height=height,
polygons=polygons,
names=names)
def load_mask(self, image_id):
image_info = self.image_info[image_id]
if image_info["source"] != "pig":
return super(self.__class__, self).load_mask(image_id)
info = self.image_info[image_id]
class_names = info["names"]
mask = np.zeros([info["height"], info["width"], len(info["polygons"])], dtype=np.uint8)
for i, p in enumerate(info["polygons"]):
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
class_ids = np.zeros([len(info["polygons"])])
for i, p in enumerate(class_names):
if p['name'] == 'standing_pig':
class_ids[i] = 1
elif p['name'] == 'lying_pig':
class_ids[i] = 2
#assert code here to extend to other labels
class_ids = class_ids.astype(int)
# Return mask, and array of class IDs of each instance. Since we have
# one class ID only, we return an array of 1s
return mask.astype(np.bool), class_ids
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == "pig":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def detect_and_color_splash(model, image_path=None, video_path=None, out_dir=''):
assert image_path or video_path
class_names = ['BG', 'standing_pig', 'lying_pig']
# Image or video?
if image_path:
print("Running on {}".format(IMAGE_PATH))
# Read image
image = skimage.io.imread(IMAGE_PATH)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash and save
masked_image = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'],"image")
return masked_image
elif video_path:
# Video capture
vcapture = cv2.VideoCapture(video_path)
# width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
# height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = 1600
height = 1600
fps = vcapture.get(cv2.CAP_PROP_FPS)
# Define codec and create video writer
file_name = "splash_{:%Y%m%dT%H%M%S}.wmv".format(datetime.datetime.now())
vwriter = cv2.VideoWriter(file_name,
cv2.VideoWriter_fourcc(*'MJPG'),
fps, (width, height))
count = 0
success = True
#For video, we wish classes keep the same mask in frames, generate colors for masks
colors = visualize.random_colors(len(class_names))
while success:
print("frame: ", count)
plt.clf()
plt.close()
success, image = vcapture.read()
if success:
# OpenCV returns images as BGR, convert to RGB
image = image[..., ::-1]
# Detect objects
r = model.detect([image], verbose=0)[0]
# Color splash
# splash = color_splash(image, r['masks'])
splash = visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'], colors=colors, making_video=True)
# Add image to video writer
vwriter.write(splash)
count += 1
vwriter.release()
def train(model):
dataset_train = PigDataset()
dataset_train.load_VIA(DATASET_DIR, "train")
dataset_train.prepare()
dataset_val = PigDataset()
dataset_val.load_VIA(DATASET_DIR, "val")
dataset_val.prepare()
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=PigConfig().LEARNING_RATE,
epochs=model.epoch+EPOCH_NUM,
layers='heads')
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = ExWindow()
sys.exit(app.exec_())
```
|
{
"source": "jeonsworld/MLP-Mixer-Pytorch",
"score": 2
}
|
#### File: MLP-Mixer-Pytorch/models/configs.py
```python
import ml_collections
def get_mixer_b16_config():
"""Returns Mixer-B/16 configuration."""
config = ml_collections.ConfigDict()
config.name = 'Mixer-B_16'
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_dim = 768
config.num_blocks = 12
config.tokens_mlp_dim = 384
config.channels_mlp_dim = 3072
return config
def get_mixer_l16_config():
"""Returns Mixer-L/16 configuration."""
config = ml_collections.ConfigDict()
config.name = 'Mixer-L_16'
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_dim = 1024
config.num_blocks = 24
config.tokens_mlp_dim = 512
config.channels_mlp_dim = 4096
return config
```
|
{
"source": "Jeontaeyun/python-team-notes",
"score": 3
}
|
#### File: python-team-notes/dynamic-programming/fibo.py
```python
d = [0] * 100
def fibo(x):
if x == 1 or x == 2:
return 1
if d[x] != 0:
return d[x]
print("f(", x, ")")
d[x] = fibo(x-1) + fibo(x-2)
return d[x]
print(fibo(6))
```
#### File: python-team-notes/dynamic-programming/to-be-one.py
```python
n = int(input())
# ์ ํ๋ ์ ์(n)๊ฐ 1 <= n <= 30,000
dynamic_table = [-1] * 30001
# Top-Down ๋ฐฉ์์ผ๋ก ์งํ
def calculator(x):
if x == 1:
dynamic_table[x] = 0
return dynamic_table[x]
# Memoization Code
if dynamic_table[x] != -1:
return dynamic_table[x]
# Memoization Check Code
print("f(", x, ")")
dynamic_table[x-1] = calculator(x-1)
result = dynamic_table[x-1]
if x % 2 == 0:
dynamic_table[x//2] = calculator(x//2)
result = min(dynamic_table[x//2], result)
if x % 3 == 0:
dynamic_table[x//3] = calculator(x//3)
result = min(dynamic_table[x//3], result)
if x % 5 == 0:
dynamic_table[x//5] = calculator(x//5)
result = min(dynamic_table[x//5], result)
return result + 1
print(calculator(n))
```
#### File: python-team-notes/graph/city-divide.py
```python
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
def union_city(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
def check_cycle(parent, a, b):
if find_parent(parent, a) == find_parent(parent, b):
return True
else:
return False
def sort_edges(edges):
edges = edges.sort(key=lambda x: x[-1])
def divide_city(edges, cycle_tables):
cost_list = []
sort_edges(edges)
for edge in edges:
a, b, cost = edge
if not check_cycle(cycle_tables, a, b):
union_city(cycle_tables, a, b)
cost_list.append(cost)
result = 0
cost_list.pop(len(cost_list)-1)
for cost in cost_list:
result += cost
return result
v, e = map(int, input().split())
edges = []
cycle_tables = [0] * (v+1)
# Initial Cycle Table
for i in range(1, v+1):
cycle_tables[i] = i
for _ in range(e):
a, b, cost = map(int, input().split())
edges.append((a, b, cost))
result = divide_city(edges, cycle_tables)
print(result)
```
#### File: implementation/basic/game-developer.py
```python
n, k = map(int, input().split())
position_data = list(map(int, input().split()))
map_data = [[int(x) for x in input().split()] for _ in range(n)]
# [up, down, left, right]
directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]
current_position = [position_data[0], position_data[1]]
current_direction = position_data[2]
count_scan = 0
memoization = []
def turn_left():
global count_scan
global current_direction
count_scan += 1
current_direction -= 1
if current_direction == -1:
current_direction = 3
def check_is_block(row, column):
is_block_row = row < 0 or row >= n
is_block_column = column < 0 or column >= n
return is_block_row or is_block_column
while True:
turn_left()
next_row = current_position[0] + directions[current_direction][0]
next_column = current_position[1] + directions[current_direction][1]
if check_is_block(next_row, next_column):
continue
if not (next_row, next_column) in memoization and map_data[next_row][next_column] == 0:
count_scan = 0
current_position = [next_row, next_column]
memoization.append((next_row, next_column))
if count_scan == 3:
back_row = current_position[0] - directions[current_direction][0]
back_column = current_position[1] - directions[current_direction][1]
if check_is_block(back_row, back_column):
continue
if not map_data[back_row][back_column] == 1:
break
else:
current_position = [back_row, back_column]
solution = len(memoization)
print(solution)
```
#### File: python-team-notes/search/rice-cake-problem.py
```python
n, m = map(int, input().split())
data = list(map(int, input().split()))
data.sort()
maxValue = data[n-1]
def binary_search(array, start, end):
mid = (start + end) // 2
result = 0
for length in data:
if length > mid:
result += length - mid
if result == m:
return mid
if result > m:
return binary_search(array, mid + 1, end)
else:
return binary_search(array, start, mid - 1)
print(binary_search(data, 0, maxValue))
```
#### File: python-team-notes/search/search-part.py
```python
n = int(input())
part_list = list(map(int, input().split()))
m = int(input())
require_list = list(map(int, input().split()))
def binary_search(array, target, start, end):
mid = (start + end) // 2
if start >= end:
return "no"
if array[mid] == target:
return "yes"
if array[mid] > target:
return binary_search(array, target, start, mid - 1)
else:
return binary_search(array, target, mid + 1, end)
return "no"
part_list = sorted(part_list)
result = [binary_search(part_list, i, 0, n) for i in require_list]
for answer in result:
print(answer, end=" ")
```
|
{
"source": "JeorgeReds78/HCap-FeCriJor",
"score": 3
}
|
#### File: JeorgeReds78/HCap-FeCriJor/ejercicioHCAP.py
```python
import cv2
import numpy as np
#Funciรณn de la escala de grises
def escala_gris(A):
B = np.zeros([A.shape[0], A.shape[1]])
for i in range(0, len(A)):
for j in range(0, len(A[0])):
suma = 0
for k in range(0, len(A[0][0])):
suma += A[i][j][k]
suma = int(suma/len(A[0][0]))
B[i][j] = suma
return B
#Funciรณn de la convolucio
def convolucion(A, B):
C = np.zeros([A.shape[0]-2, A.shape[1]-2])
for i in range(0, len(A)-2):
for j in range(0, len(A[0])-2):
suma = 0
for x in range(0, len(B)):
for y in range(0, len(B[0])):
suma += A[i+x][j+y]*B[x][y]
if suma > 255:
suma = 255
C[i][j] = suma
return C
#Funciรณn de la escala en blanco y negro
def escala_BN(A):
B = np.zeros([A.shape[0], A.shape[1]])
for i in range(0, len(A)):
for j in range(0, len(A[0])):
if A[i][j] > 128:
B[i][j] = 255
return B
#Funciรณn para agregar 0
def padding(A):
B = np.zeros((len(A)+2, len(A[0])+2))
for i in range(0, len(A)):
for j in range(0, len(A[0])):
B[i+1][j+1] = A[i][j]
return B
Filtro = [[1, 1, 1],[1, 0, 1],[1, 1, 1]]
Img = cv2.imread('image.jpg')
Img = cv2.cvtColor(Img,cv2.COLOR_BGR2RGB)
Img2 = escala_gris(Img)
cv2.imwrite('gris.jpg', Img2)
Img_pad = padding(Img2)
Img_pad = convolucion(Img_pad, Filtro)
cv2.imwrite('pad.jpg', Img_pad)
Img_sin_pad = convolucion(Img2, Filtro)
cv2.imwrite('sinpad.jpg', Img_sin_pad)
Img_BN = escala_BN(Img2)
cv2.imwrite('blancoYNegro.jpg', Img_BN)
```
|
{
"source": "jeorjebot/kp-anonymity",
"score": 3
}
|
#### File: kp-anonymity/Utility/draw_stat.py
```python
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
with open(Path('tmp.txt'), 'r') as f:
lines_read = f.readlines()
lines = list()
for line in lines_read:
lines.append(line.split())
labels = list()
naive_time = list()
kapra_time = list()
for index, line in enumerate(lines):
if index % 2 == 0: # naive
labels.append(line[1])
naive_time.append(float(line[2]))
else: # kapra
kapra_time.append(float(line[2]))
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, naive_time, width, label='Naive')
rects2 = ax.bar(x + width/2, kapra_time, width, label='Kapra')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Time (s)')
ax.set_xlabel('Number of instances')
ax.set_title('Time efficiency')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
#plt.show()
plt.savefig('stat.png')
```
|
{
"source": "jeorjebot/lithops",
"score": 2
}
|
#### File: lithops/lithops/wait.py
```python
import signal
import logging
import time
import concurrent.futures as cf
from functools import partial
from lithops.utils import is_unix_system, timeout_handler, \
is_notebook, is_lithops_worker, FuturesList
from lithops.storage import InternalStorage
from lithops.monitor import JobMonitor
from types import SimpleNamespace
from itertools import chain
ALL_COMPLETED = 1
ANY_COMPLETED = 2
ALWAYS = 3
THREADPOOL_SIZE = 64
WAIT_DUR_SEC = 1
logger = logging.getLogger(__name__)
def wait(fs, internal_storage=None, throw_except=True, timeout=None,
return_when=ALL_COMPLETED, download_results=False, job_monitor=None,
threadpool_size=THREADPOOL_SIZE, wait_dur_sec=WAIT_DUR_SEC):
"""
Wait for the Future instances (possibly created by different Executor instances)
given by fs to complete. Returns a named 2-tuple of sets. The first set, named done,
contains the futures that completed (finished or cancelled futures) before the wait
completed. The second set, named not_done, contains the futures that did not complete
(pending or running futures). timeout can be used to control the maximum number of
seconds to wait before returning.
:param fs: Futures list. Default None
:param throw_except: Re-raise exception if call raised. Default True.
:param return_when: One of `ALL_COMPLETED`, `ANY_COMPLETED`, `ALWAYS`
:param download_results: Download results. Default false (Only get statuses)
:param timeout: Timeout of waiting for results.
:param threadpool_zise: Number of threads to use. Default 64
:param wait_dur_sec: Time interval between each check.
:return: `(fs_done, fs_notdone)`
where `fs_done` is a list of futures that have completed
and `fs_notdone` is a list of futures that have not completed.
:rtype: 2-tuple of list
"""
if not fs:
return
if type(fs) != list and type(fs) != FuturesList:
fs = [fs]
if download_results:
msg = 'ExecutorID {} - Getting results from functions'.format(fs[0].executor_id)
fs_done = [f for f in fs if f.done]
fs_not_done = [f for f in fs if not f.done]
else:
msg = 'ExecutorID {} - Waiting for functions to complete'.format(fs[0].executor_id)
fs_done = [f for f in fs if f.success or f.done]
fs_not_done = [f for f in fs if not (f.success or f.done)]
logger.info(msg)
if not fs_not_done:
return fs_done, fs_not_done
if is_unix_system() and timeout is not None:
logger.debug('Setting waiting timeout to {} seconds'.format(timeout))
error_msg = 'Timeout of {} seconds exceeded waiting for function activations to finish'.format(timeout)
signal.signal(signal.SIGALRM, partial(timeout_handler, error_msg))
signal.alarm(timeout)
# Setup progress bar
pbar = None
if not is_lithops_worker() and logger.getEffectiveLevel() == logging.INFO:
from tqdm.auto import tqdm
if not is_notebook():
print()
pbar = tqdm(bar_format=' {l_bar}{bar}| {n_fmt}/{total_fmt} ',
total=len(fs), disable=None)
pbar.update(len(fs_done))
try:
jobs = _create_jobs_from_futures(fs, internal_storage)
if not job_monitor:
job_monitor = JobMonitor(backend='storage')
[job_monitor.create(**job_data).start() for job_data in jobs]
sleep_sec = wait_dur_sec if job_monitor.backend == 'storage' else 0.3
if return_when == ALL_COMPLETED:
while not _all_done(fs, download_results):
for job_data in jobs:
new_data = _get_job_data(fs, job_data, pbar=pbar,
throw_except=throw_except,
download_results=download_results,
threadpool_size=threadpool_size)
time.sleep(0 if new_data else sleep_sec)
elif return_when == ANY_COMPLETED:
while not _any_done(fs, download_results):
for job_data in jobs:
new_data = _get_job_data(fs, job_data, pbar=pbar,
throw_except=throw_except,
download_results=download_results,
threadpool_size=threadpool_size)
time.sleep(0 if new_data else sleep_sec)
elif return_when == ALWAYS:
for job_data in jobs:
_get_job_data(fs, job_data, pbar=pbar,
throw_except=throw_except,
download_results=download_results,
threadpool_size=threadpool_size)
except KeyboardInterrupt as e:
if download_results:
not_dones_call_ids = [(f.job_id, f.call_id) for f in fs if not f.done]
else:
not_dones_call_ids = [(f.job_id, f.call_id) for f in fs if not f.success and not f.done]
msg = ('Cancelled - Total Activations not done: {}'.format(len(not_dones_call_ids)))
if pbar:
pbar.close()
print()
logger.info(msg)
raise e
except Exception as e:
raise e
finally:
if is_unix_system():
signal.alarm(0)
if pbar and not pbar.disable:
pbar.close()
if not is_notebook():
print()
if download_results:
fs_done = [f for f in fs if f.done]
fs_notdone = [f for f in fs if not f.done]
else:
fs_done = [f for f in fs if f.success or f.done]
fs_notdone = [f for f in fs if not f.success and not f.done]
return fs_done, fs_notdone
def get_result(fs, throw_except=True, timeout=None,
threadpool_zise=THREADPOOL_SIZE,
wait_dur_sec=WAIT_DUR_SEC,
internal_storage=None):
"""
For getting the results from all function activations
:param fs: Futures list. Default None
:param throw_except: Reraise exception if call raised. Default True.
:param verbose: Shows some information prints. Default False
:param timeout: Timeout for waiting for results.
:param THREADPOOL_SIZE: Number of threads to use. Default 128
:param WAIT_DUR_SEC: Time interval between each check.
:return: The result of the future/s
"""
if type(fs) != list and type(fs) != FuturesList:
fs = [fs]
fs_done, _ = wait(fs=fs, throw_except=throw_except,
timeout=timeout, download_results=True,
internal_storage=internal_storage,
threadpool_zise=threadpool_zise,
wait_dur_sec=wait_dur_sec)
result = []
fs_done = [f for f in fs_done if not f.futures and f._produce_output]
for f in fs_done:
result.append(f.result(throw_except=throw_except))
logger.debug("ExecutorID {} - Finished getting results".format(fs[0].executor_id))
return result
def _create_jobs_from_futures(fs, internal_storage):
"""
Creates a dummy job necessary for the job monitor
"""
jobs = []
present_jobs = {f.job_key for f in fs}
for job_key in present_jobs:
job_data = {}
job = SimpleNamespace()
job.futures = [f for f in fs if f.job_key == job_key]
job.total_calls = len(job.futures)
f = job.futures[0]
job.executor_id = f.executor_id
job.job_id = f.job_id
job.job_key = f.job_key
job_data['job'] = job
if internal_storage and internal_storage.backend == f._storage_config['backend']:
job_data['internal_storage'] = internal_storage
else:
job_data['internal_storage'] = InternalStorage(f._storage_config)
jobs.append(job_data)
return jobs
def _all_done(fs, download_results):
"""
Checks if all futures are ready or done
"""
if download_results:
return all([f.done for f in fs])
else:
return all([f.success or f.done for f in fs])
def _any_done(fs, download_results):
"""
Checks if any futures irs ready or done
"""
if download_results:
return any([f.done for f in fs])
else:
return any([f.success or f.done for f in fs])
def _get_job_data(fs, job_data, download_results, throw_except, threadpool_size, pbar):
"""
Downloads all status/results from ready futures
"""
job = job_data['job']
internal_storage = job_data['internal_storage']
if download_results:
callids_done = [(f.executor_id, f.job_id, f.call_id) for f in job.futures if (f.ready or f.success)]
not_done_futures = [f for f in job.futures if not f.done]
else:
callids_done = [(f.executor_id, f.job_id, f.call_id) for f in job.futures if f.ready]
not_done_futures = [f for f in job.futures if not (f.success or f.done)]
not_done_call_ids = set([(f.executor_id, f.job_id, f.call_id) for f in not_done_futures])
new_callids_done = not_done_call_ids.intersection(callids_done)
fs_to_wait_on = []
for f in job.futures:
if (f.executor_id, f.job_id, f.call_id) in new_callids_done:
fs_to_wait_on.append(f)
def get_result(f):
f.result(throw_except=throw_except, internal_storage=internal_storage)
def get_status(f):
f.status(throw_except=throw_except, internal_storage=internal_storage)
pool = cf.ThreadPoolExecutor(max_workers=threadpool_size)
if download_results:
list(pool.map(get_result, fs_to_wait_on))
else:
list(pool.map(get_status, fs_to_wait_on))
pool.shutdown()
if pbar:
for f in fs_to_wait_on:
if (download_results and f.done) or \
(not download_results and (f.success or f.done)):
pbar.update(1)
pbar.refresh()
# Check for new futures
new_futures = list(chain(*[f._new_futures for f in fs_to_wait_on if f._new_futures]))
if new_futures:
fs.extend(new_futures)
job.futures.extend(new_futures)
if pbar:
pbar.total = pbar.total + len(new_futures)
pbar.refresh()
return len(fs_to_wait_on)
```
|
{
"source": "jeorme/tf-quant-finance",
"score": 2
}
|
#### File: experimental/local_volatility/local_volatility_model_test.py
```python
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
implied_vol = tff.black_scholes.implied_vol
LocalVolatilityModel = tff.experimental.local_volatility.LocalVolatilityModel
volatility_surface = tff.experimental.pricing_platform.framework.market_data.volatility_surface
# This function can't be moved to SetUp since that would break graph mode
# execution
def build_tensors(dim):
year = dim * [[2021, 2022]]
month = dim * [[1, 1]]
day = dim * [[1, 1]]
expiries = tff.datetime.dates_from_year_month_day(year, month, day)
valuation_date = [(2020, 1, 1)]
expiry_times = tff.datetime.daycount_actual_365_fixed(
start_date=valuation_date, end_date=expiries, dtype=tf.float64)
strikes = dim * [[[0.1, 0.9, 1.0, 1.1, 3], [0.1, 0.9, 1.0, 1.1, 3]]]
iv = dim * [[[0.135, 0.13, 0.1, 0.11, 0.13],
[0.135, 0.13, 0.1, 0.11, 0.13]]]
spot = dim * [1.0]
return valuation_date, expiries, expiry_times, strikes, iv, spot
def build_volatility_surface(val_date, expiry_times, expiries, strikes, iv,
dtype):
interpolator = tff.math.interpolation.interpolation_2d.Interpolation2D(
expiry_times, strikes, iv, dtype=dtype)
def _interpolator(t, x):
x_transposed = tf.transpose(x)
t = tf.broadcast_to(t, x_transposed.shape)
return tf.transpose(interpolator.interpolate(t, x_transposed))
return volatility_surface.VolatilitySurface(
val_date, expiries, strikes, iv, interpolator=_interpolator, dtype=dtype)
# @test_util.run_all_in_graph_and_eager_modes
class LocalVolatilityTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(LocalVolatilityTest, self).setUp()
def _get_implied_vol_from_simulations(time, strike, paths, spot, r, dtype):
r = tf.convert_to_tensor(r, dtype=dtype)
discount_factor = tf.math.exp(-r * time)
paths = tf.boolean_mask(
paths, tf.math.logical_not(tf.math.is_nan(paths)))
option_value = tf.math.reduce_mean(tf.nn.relu(paths - strike))
iv = implied_vol(
prices=discount_factor * option_value,
strikes=strike,
expiries=time,
spots=spot,
discount_factors=discount_factor,
dtype=dtype)
return iv
self._get_implied_vol = _get_implied_vol_from_simulations
@parameterized.named_parameters(
('1d', 1, [0.0], 0.1, True),
('2d', 2, [0.0], 0.1, True),
('3d', 3, [0.0], 0.1, True),
('1d_nonzero_riskfree_rate', 1, [0.05], 0.05, True),
('1d_using_vol_surface', 1, [0.0], 0.1, False),
)
def test_lv_correctness(self, dim, risk_free_rate, time_step,
using_market_data):
"""Tests that the model reproduces implied volatility smile."""
dtype = tf.float64
num_samples = 10000
val_date, expiries, expiry_times, strikes, iv, spot = build_tensors(dim)
if using_market_data:
lv = LocalVolatilityModel.from_market_data(
dim, val_date, expiries, strikes, iv, spot, risk_free_rate, [0.0],
dtype=dtype)
else:
vs = build_volatility_surface(
val_date, expiry_times, expiries, strikes, iv, dtype=dtype)
lv = LocalVolatilityModel.from_volatility_surface(
dim, spot, vs, risk_free_rate, [0.0], dtype)
paths = lv.sample_paths(
[1.0, 2.0],
num_samples=num_samples,
initial_state=spot,
time_step=time_step,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
seed=[1, 2])
for d in range(dim):
for i in range(2):
for j in [1, 2, 3]:
sim_iv = self.evaluate(
self._get_implied_vol(expiry_times[d][i], strikes[d][i][j],
paths[:, i, d], spot[d], risk_free_rate,
dtype))
self.assertAllClose(sim_iv[0], iv[d][i][j], atol=0.005, rtol=0.005)
if __name__ == '__main__':
tf.test.main()
```
#### File: equity_instruments/american_option/american_option_impl.py
```python
from typing import Any, Optional, List, Dict, Union, Tuple
import dataclasses
import tensorflow.compat.v2 as tf
from tf_quant_finance import datetime as dateslib
from tf_quant_finance.experimental.pricing_platform.framework.core import curve_types as curve_types_lib
from tf_quant_finance.experimental.pricing_platform.framework.core import instrument
from tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd
from tf_quant_finance.experimental.pricing_platform.framework.core import types
from tf_quant_finance.experimental.pricing_platform.framework.equity_instruments import utils as equity_utils
from tf_quant_finance.experimental.pricing_platform.framework.equity_instruments.american_option import proto_utils
from tf_quant_finance.experimental.pricing_platform.framework.equity_instruments.american_option import utils
from tf_quant_finance.experimental.pricing_platform.framework.market_data import utils as market_data_utils
from tf_quant_finance.experimental.pricing_platform.framework.rate_instruments import cashflow_streams
from tf_quant_finance.experimental.pricing_platform.instrument_protos import american_equity_option_pb2 as american_option_pb2
@dataclasses.dataclass(frozen=True)
class AmericanOptionConfig:
discounting_curve: Optional[
Dict[types.CurrencyProtoType,
curve_types_lib.CurveType]] = dataclasses.field(default_factory=dict)
model: str = "BS-LSM" # default pricing model is LSM under Black-Scholes
num_samples: int = 96000
num_calibration_samples: int = None
num_exercise_times: int = 100
seed: types.IntTensor = (42, 42) # Should be an integer `Tensor` of shape [2]
class AmericanOption(instrument.Instrument):
"""Represents a batch of American Equity Options.
An American equity option is a contract that gives the holder an opportunity
to buy (call) or sell (put) an equity for a predifined value (strike) at
any date before the expiry.
The AmericanOption class can be used to create and price multiple options
simultaneously.
#### Example:
The following example illustrates the construction of a batch of American
options and pricing them.
```None
american_option_proto = american_option_pb2.AmericanEquityOption(
short_position=True,
expiry_date=date_pb2.Date(year=2022, month=5, day=21),
contract_amount=decimal_pb2.Decimal(units=10000),
strike=decimal_pb2.Decimal(units=1500),
equity="GOOG",
currency=Currency.USD(),
business_day_convention=BusinessDayConvention.MODIFIED_FOLLOWING(),
is_call_option=False)
valuation_date = [(2020, 2, 8)]
market = market_data.MarketDataDict(valuation_date, ...)
am_option_portfolio = forward_rate_agreement.ForwardRateAgreement.from_protos(
[american_option_proto])
am_option_portfolio[0].price(market)
```
"""
def __init__(self,
short_position: types.BoolTensor,
currency: types.CurrencyProtoType,
expiry_date: types.DateTensor,
equity: List[str],
contract_amount: types.FloatTensor,
strike: types.FloatTensor,
is_call_option: List[bool],
business_day_convention: types.BusinessDayConventionProtoType,
calendar: types.BankHolidaysProtoType,
settlement_days: Optional[types.IntTensor] = 0,
discount_curve_type: curve_types_lib.CurveType = None,
discount_curve_mask: types.IntTensor = None,
equity_mask: types.IntTensor = None,
config: Union[AmericanOptionConfig, Dict[str, Any]] = None,
batch_names: Optional[types.StringTensor] = None,
dtype: Optional[types.Dtype] = None,
name: Optional[str] = None):
"""Initializes the batch of American Equity Options.
Args:
short_position: Whether the price is computed for the contract holder.
Default value: `True` which means that the price is for the contract
holder.
currency: The denominated currency.
expiry_date: A `DateTensor` specifying the dates on which the options
expire.
equity: A string name of the underlyings.
contract_amount: A `Tensor` of real dtype and shape compatible with
with `short_position`.
strike: `Tensor` of real dtype and shape compatible with
with `short_position`. Option strikes.
is_call_option: A bool `Tensor` of shape compatible with with
`short_position`. Indicates which options are of call type.
business_day_convention: A business count convention.
calendar: A calendar to specify the weekend mask and bank holidays.
settlement_days: An integer `Tensor` of the shape broadcastable with the
shape of `fixing_date`.
discount_curve_type: An optional instance of `CurveType` or a list of
those. If supplied as a list and `discount_curve_mask` is not supplied,
the size of the list should be the same as the number of priced
instruments. Defines discount curves for the instruments.
Default value: `None`, meaning that discount curves are inferred
from `currency` and `config`.
discount_curve_mask: An optional integer `Tensor` of values ranging from
`0` to `len(discount_curve_type) - 1` and of shape `batch_shape`.
Identifies a mapping between `discount_curve_type` list and the
underlying instruments.
Default value: `None`.
equity_mask: An optional integer `Tensor` of values ranging from
`0` to `len(equity) - 1` and of shape `batch_shape`. Identifies
a mapping between `equity` list and the underlying instruments.
Default value: `None`.
config: Optional `AmericanOptionConfig` or a dictionary. If dictionary,
then the keys should be the same as the field names of
`AmericanOptionConfig`.
batch_names: A string `Tensor` of instrument names. Should be of shape
`batch_shape + [2]` specying name and instrument type. This is useful
when the `from_protos` method is used and the user needs to identify
which instruments got batched together.
dtype: `tf.Dtype` of the input and output real `Tensor`s.
Default value: `None` which maps to `float64`.
name: Python str. The name to give to the ops created by this class.
Default value: `None` which maps to 'AmericanOption'.
"""
self._name = name or "AmericanOption"
with tf.name_scope(self._name):
if batch_names is not None:
self._names = tf.convert_to_tensor(batch_names,
name="batch_names")
else:
self._names = None
self._dtype = dtype or tf.float64
ones = tf.constant(1, dtype=self._dtype)
self._short_position = tf.where(
short_position, ones, -ones, name="short_position")
self._contract_amount = tf.convert_to_tensor(
contract_amount, dtype=self._dtype, name="contract_amount")
self._strike = tf.convert_to_tensor(strike, dtype=self._dtype,
name="strike")
self._is_call_option = tf.convert_to_tensor(
is_call_option, dtype=tf.bool, name="strike")
settlement_days = tf.convert_to_tensor(settlement_days)
# Business day roll convention and the end of month flag
roll_convention, eom = market_data_utils.get_business_day_convention(
business_day_convention)
# TODO(b/160446193): Calendar is ignored at the moment
calendar = dateslib.create_holiday_calendar(
weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)
if isinstance(expiry_date, types.IntTensor):
self._expiry_date = dateslib.dates_from_tensor(expiry_date)
else:
self._expiry_date = dateslib.convert_to_date_tensor(expiry_date)
self._settlement_days = settlement_days
self._roll_convention = roll_convention
# Get discount and reference curves
self._currency = cashflow_streams.to_list(currency)
self._equity = cashflow_streams.to_list(equity)
if len(self._currency) != len(self._equity):
if len(self._currency) > 1 and len(self._equity) > 1:
raise ValueError(
"Number of currencies and equities should be the same "
"but it is {0} and {1}".format(len(self._currency),
len(self._equity)))
config = _process_config(config)
[
self._model,
self._num_samples,
self._seed,
self._num_exercise_times,
self._num_calibration_samples
] = _get_config_values(config)
if discount_curve_type is None:
discount_curve_type = []
for currency in self._currency:
if currency in config.discounting_curve:
curve_type = config.discounting_curve[currency]
else:
# Default discounting curve
curve_type = curve_types_lib.RiskFreeCurve(
currency=currency)
discount_curve_type.append(curve_type)
# Get masks for discount curves and vol surfaces
[
self._discount_curve_type,
self._discount_curve_mask
] = cashflow_streams.process_curve_types(discount_curve_type,
discount_curve_mask)
[
self._equity,
self._equity_mask,
] = equity_utils.process_equities(self._equity, equity_mask)
# Get batch shape
self._batch_shape = tf.shape(strike)
@classmethod
def create_constructor_args(
cls, proto_list: List[american_option_pb2.AmericanEquityOption],
config: AmericanOptionConfig = None) -> Dict[str, Any]:
"""Creates a dictionary to initialize AmericanEquityOption.
The output dictionary is such that the instruments can be initialized
as follows:
```
initializer = create_constructor_args(proto_list, config)
american_options = [AmericanEquityOption(**data)
for data in initializer.values()]
```
The keys of the output dictionary are unique identifiers of the batched
instruments. This is useful for identifying an existing graph that could be
reused for the instruments without the need of rebuilding the graph.
Args:
proto_list: A list of protos for which the initialization arguments are
constructed.
config: An instance of `AmericanOptionConfig`.
Returns:
A possibly nested dictionary such that each value provides initialization
arguments for the AmericanEquityOption.
"""
am_option_data = proto_utils.from_protos(proto_list, config)
res = {}
for key in am_option_data:
tensor_repr = proto_utils.tensor_repr(am_option_data[key])
res[key] = tensor_repr
return res
@classmethod
def from_protos(
cls,
proto_list: List[american_option_pb2.AmericanEquityOption],
config: AmericanOptionConfig = None
) -> List["AmericanOption"]:
proto_dict = proto_utils.from_protos(proto_list, config)
intruments = []
for kwargs in proto_dict.values():
# Create an instrument
intruments.append(cls(**kwargs))
return intruments
@classmethod
def group_protos(
cls,
proto_list: List[american_option_pb2.AmericanEquityOption],
config: AmericanOptionConfig = None
) -> Dict[str, List["AmericanOption"]]:
return proto_utils.group_protos(proto_list, config)
def price(self,
market: pmd.ProcessedMarketData,
name: Optional[str] = None) -> types.FloatTensor:
"""Returns the present value of the American options.
Args:
market: An instance of `ProcessedMarketData`.
name: Python str. The name to give to the ops created by this function.
Default value: `None` which maps to 'price'.
Returns:
A `Tensor` of shape `batch_shape` containing the modeled price of each
American option contract based on the input market data.
"""
name = name or (self._name + "_price")
with tf.name_scope(name):
discount_curve = cashflow_streams.get_discount_curve(
self._discount_curve_type, market, self._discount_curve_mask)
vol_surface = equity_utils.get_vol_surface(
self._equity, market, self._equity_mask)
spots = tf.stack(market.spot(self._equity), axis=0)
discount_factors = discount_curve.discount_factor(
self._expiry_date.expand_dims(axis=-1))
daycount_convention = discount_curve.daycount_convention
day_count_fn = market_data_utils.get_daycount_fn(daycount_convention)
if spots.shape.rank > 0:
spots = tf.gather(spots, self._equity_mask)
if self._model == "BS-LSM":
# TODO(b/168798725): volatility should be time-dependent
vols = vol_surface.volatility(
expiry_dates=self._expiry_date.expand_dims(axis=-1),
strike=tf.expand_dims(self._strike, axis=-1))
prices = utils.bs_lsm_price(
spots=spots,
expiry_times=day_count_fn(
start_date=market.date,
end_date=self._expiry_date,
dtype=self._dtype),
strikes=self._strike,
volatility=tf.squeeze(vols, axis=-1),
discount_factors=tf.squeeze(discount_factors),
is_call_option=self._is_call_option,
num_samples=self._num_samples,
num_exercise_times=self._num_exercise_times,
num_calibration_samples=self._num_calibration_samples,
seed=self._seed)
return self._short_position * self._contract_amount * prices
else:
raise ValueError("Only BS-LSM model is suppoted. "
"Supplied {}".format(self._model))
@property
def batch_shape(self) -> tf.Tensor:
return self._batch_shape
@property
def names(self) -> tf.Tensor:
"""Returns a string tensor of names and instrument types.
The shape of the output is [batch_shape, 2].
"""
return self._names
def ir_delta(self,
tenor: types.DateTensor,
processed_market_data: pmd.ProcessedMarketData,
curve_type: Optional[curve_types_lib.CurveType] = None,
shock_size: Optional[float] = None) -> tf.Tensor:
"""Computes delta wrt to the tenor perturbation."""
raise NotImplementedError("Coming soon.")
def ir_delta_parallel(
self,
processed_market_data: pmd.ProcessedMarketData,
curve_type: Optional[curve_types_lib.CurveType] = None,
shock_size: Optional[float] = None) -> tf.Tensor:
"""Computes delta wrt to the curve parallel perturbation."""
raise NotImplementedError("Coming soon.")
def ir_vega(self,
tenor: types.DateTensor,
processed_market_data: pmd.ProcessedMarketData,
shock_size: Optional[float] = None) -> tf.Tensor:
"""Computes vega wrt to the tenor perturbation."""
raise NotImplementedError("Coming soon.")
def _process_config(
config: Union[AmericanOptionConfig, Dict[str, Any], None]
) -> AmericanOptionConfig:
"""Converts config to AmericanOptionConfig."""
if config is None:
return AmericanOptionConfig()
if isinstance(config, AmericanOptionConfig):
return config
model = config.get("model", "BS-LSM")
seed = config.get("seed", [42, 42])
num_exercise_times = config.get("num_exercise_times", 100)
num_samples = config.get("num_samples", 96000)
num_calibration_samples = config.get("num_calibration_samples", None)
discounting_curve = config.get("discounting_curve", dict())
return AmericanOptionConfig(discounting_curve=discounting_curve,
model=model,
seed=seed,
num_exercise_times=num_exercise_times,
num_samples=num_samples,
num_calibration_samples=num_calibration_samples)
def _get_config_values(
config: AmericanOptionConfig
) -> Tuple[str, int, types.IntTensor, int, int]:
"""Extracts config values."""
[
model,
num_samples,
seed,
num_exercise_times,
num_calibration_samples
] = [config.model,
config.num_samples,
tf.convert_to_tensor(config.seed, name="seed"),
config.num_exercise_times,
config.num_calibration_samples]
return model, num_samples, seed, num_exercise_times, num_calibration_samples
__all__ = ["AmericanOptionConfig", "AmericanOption"]
```
#### File: framework/market_data/market_data_test.py
```python
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
core = tff.experimental.pricing_platform.framework.core
market_data = tff.experimental.pricing_platform.framework.market_data
interpolation_method = tff.experimental.pricing_platform.framework.core.interpolation_method
@test_util.run_all_in_graph_and_eager_modes
class MarketDataTest(tf.test.TestCase):
def setUp(self):
dates = [[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8],
[2027, 2, 8], [2030, 2, 8], [2050, 2, 8]]
discounts = [0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.8013675,
0.72494879, 0.37602059]
vol_dates = [
[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2027, 2, 8]]
strikes = [[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510]]
volatilities = [[0.1, 0.12, 0.13],
[0.15, 0.2, 0.15],
[0.1, 0.2, 0.1],
[0.1, 0.2, 0.1],
[0.1, 0.1, 0.3]]
libor_3m_config = market_data.config.RateConfig(
interpolation_method=interpolation_method.InterpolationMethod.LINEAR)
self._rate_config = {"USD": {"LIBOR_3M": libor_3m_config}}
risk_free_dates = [
[2021, 2, 8], [2022, 2, 8], [2023, 2, 8], [2025, 2, 8], [2050, 2, 8]]
risk_free_discounts = [
0.97197441, 0.94022746, 0.91074031, 0.85495089, 0.37602059]
self._market_data_dict = {
"USD": {
"risk_free_curve":
{"dates": risk_free_dates, "discounts": risk_free_discounts},
"OIS":
{"dates": dates, "discounts": discounts},
"LIBOR_3M":
{"dates": dates, "discounts": discounts},},
"GOOG": {
"spot": 1500,
"volatility_surface": {"dates": vol_dates,
"strikes": strikes,
"implied_volatilities": volatilities}
}}
self._valuation_date = [(2020, 6, 24)]
self._libor_discounts = discounts
self._risk_free_discounts = risk_free_discounts
super(MarketDataTest, self).setUp()
def test_discount_curve(self):
market = market_data.MarketDataDict(
self._valuation_date,
self._market_data_dict,
config=self._rate_config)
# Get the risk free discount curve
risk_free_curve_type = core.curve_types.RiskFreeCurve(currency="USD")
risk_free_curve = market.yield_curve(risk_free_curve_type)
# Get LIBOR 3M discount
libor_3m = core.rate_indices.RateIndex(type="LIBOR_3M")
rate_index_curve_type = core.curve_types.RateIndexCurve(
currency="USD", index=libor_3m)
libor_3m_curve = market.yield_curve(rate_index_curve_type)
with self.subTest("RiskFree"):
discount_factor_nodes = risk_free_curve.discount_factor_nodes
self.assertAllClose(discount_factor_nodes, self._risk_free_discounts)
with self.subTest("LIBOR_3M"):
discount_factor_nodes = libor_3m_curve.discount_factor_nodes
self.assertAllClose(discount_factor_nodes, self._libor_discounts)
def test_volatility(self):
market = market_data.MarketDataDict(
self._valuation_date,
self._market_data_dict,
config=self._rate_config)
# Get volatility surface
vol_surface = market.volatility_surface(["GOOG", "GOOG"])
expiry = tff.datetime.dates_from_year_month_day(
year=[[2023], [2030]], month=[[5], [10]], day=[[10], [15]])
vols = vol_surface.volatility(expiry_dates=expiry, strike=[[1510], [1520]])
self.assertAllClose(
self.evaluate(vols), [[0.108], [0.31]], atol=1e-6)
if __name__ == "__main__":
tf.test.main()
```
#### File: models/hjm/zero_coupon_bond_option_test.py
```python
import numpy as np
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
# @test_util.run_all_in_graph_and_eager_modes
class HJMBondOptionTest(tf.test.TestCase):
def setUp(self):
self.mean_reversion_1d = [0.03]
self.volatility_1d = [0.02]
self.volatility_time_dep_1d = [0.01, 0.02]
self.mean_reversion_2d = [0.03, 0.06]
self.volatility_2d = [0.02, 0.01]
super(HJMBondOptionTest, self).setUp()
def test_correctness_1d(self):
"""Tests model with constant parameters in 1 dimension."""
dtype = tf.float64
error_tol = 1e-2
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([1.0])
maturities = np.array([5.0])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries)
price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=1,
mean_reversion=self.mean_reversion_1d,
volatility=self.volatility_1d,
discount_rate_fn=discount_rate_fn,
num_samples=500000,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
self.assertEqual(price.dtype, dtype)
self.assertAllEqual(price.shape, [1, 1])
price = self.evaluate(price)
self.assertAllClose(price, [[0.02817777]], rtol=error_tol, atol=error_tol)
def test_correctness_time_dep_1d(self):
"""Tests model with piecewise constant volatility in 1 dimension."""
dtype = tf.float64
error_tol = 1e-2
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([1.0])
maturities = np.array([5.0])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries)
vol_piecewise_constant_fn = tff.math.piecewise.PiecewiseConstantFunc(
jump_locations=[0.5], values=self.volatility_time_dep_1d, dtype=dtype)
def piecewise_1d_volatility_fn(t, r_t):
vol = vol_piecewise_constant_fn([t])
return tf.fill(dims=[r_t.shape[0], 1], value=vol)
price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=1,
mean_reversion=self.mean_reversion_1d,
volatility=piecewise_1d_volatility_fn,
discount_rate_fn=discount_rate_fn,
num_samples=500000,
time_step=0.05,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
self.assertEqual(price.dtype, dtype)
self.assertAllEqual(price.shape, [1, 1])
price = self.evaluate(price)
self.assertAllClose(price, [[0.02237839]], rtol=error_tol, atol=error_tol)
def test_1d_batch(self):
"""Tests model with 1d batch of options."""
dtype = tf.float64
error_tol = 1e-2
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([1.0, 1.0, 1.0])
maturities = np.array([5.0, 5.0, 5.0])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries)
price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=1,
mean_reversion=self.mean_reversion_1d,
volatility=self.volatility_1d,
discount_rate_fn=discount_rate_fn,
num_samples=500000,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
self.assertEqual(price.dtype, dtype)
self.assertAllEqual(price.shape, [3, 1])
price = self.evaluate(price)
self.assertAllClose(
price, [[0.02817777], [0.02817777], [0.02817777]],
rtol=error_tol,
atol=error_tol)
def test_2d_batch(self):
"""Tests model with 2d batch of options."""
dtype = tf.float64
error_tol = 1e-2
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([[1.0, 1.0], [2.0, 2.0]])
maturities = np.array([[5.0, 5.0], [4.0, 4.0]])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries)
price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=1,
mean_reversion=self.mean_reversion_1d,
volatility=self.volatility_1d,
discount_rate_fn=discount_rate_fn,
num_samples=500000,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
self.assertEqual(price.dtype, dtype)
self.assertAllEqual(price.shape, [2, 2, 1])
price = self.evaluate(price)
expected = [[[0.02817777], [0.02817777]], [[0.02042677], [0.02042677]]]
self.assertAllClose(price, expected, rtol=error_tol, atol=error_tol)
def test_correctness_2_factor(self):
"""Tests model with constant parameters with 2 factors."""
dtype = tf.float64
error_tol = 1e-3
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([1.0])
maturities = np.array([5.0])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries)
price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=2,
mean_reversion=self.mean_reversion_2d,
volatility=self.volatility_2d,
discount_rate_fn=discount_rate_fn,
num_samples=500000,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
self.assertEqual(price.dtype, dtype)
self.assertAllEqual(price.shape, [1, 1])
price = self.evaluate(price)
self.assertAllClose(price, [[0.03111126]], rtol=error_tol, atol=error_tol)
def test_correctness_2_factor_with_correlation(self):
"""Tests model with constant parameters with 2 correlated factors."""
dtype = tf.float64
error_tol = 1e-3
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([1.0])
maturities = np.array([5.0])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries)
price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=2,
mean_reversion=self.mean_reversion_2d,
volatility=self.volatility_2d,
discount_rate_fn=discount_rate_fn,
corr_matrix=[[1.0, 0.5], [0.5, 1.0]],
num_samples=500000,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
self.assertEqual(price.dtype, dtype)
self.assertAllEqual(price.shape, [1, 1])
price = self.evaluate(price)
self.assertAllClose(price, [[0.036809]], rtol=error_tol, atol=error_tol)
def test_correctness_2_factor_hull_white_consistency(self):
"""Test that under certain conditions HJM matches analytic HW results.
For the two factor model, when both mean reversions are equivalent, then
the HJM model matches that of a HW one-factor model with the same mean
reversion, and effective volatility:
eff_vol = sqrt(vol1^2 + vol2^2 + 2 rho vol1 * vol2)
where rho is the cross correlation between the two factors.
"""
dtype = tf.float64
error_tol = 1e-3
mu = 0.03
rho = 0.5
vol1 = 0.02
vol2 = 0.01
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([1.0])
maturities = np.array([5.0])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries)
hjm_price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=2,
mean_reversion=[mu, mu],
volatility=[vol1, vol2],
discount_rate_fn=discount_rate_fn,
corr_matrix=[[1.0, rho], [rho, 1.0]],
num_samples=100000,
time_step=0.05,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
hjm_price = self.evaluate(hjm_price)
hw_price = tff.models.hull_white.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=1,
mean_reversion=[mu],
volatility=[np.sqrt(vol1**2 + vol2**2 + 2.0 * rho * vol1 * vol2)],
discount_rate_fn=discount_rate_fn,
use_analytic_pricing=True,
dtype=dtype)
hw_price = self.evaluate(hw_price)
self.assertAllClose(hjm_price, hw_price, rtol=error_tol, atol=error_tol)
def test_mixed_1d_batch_2_factor(self):
"""Tests mixed 1d batch with constant parameters with 2 factors."""
dtype = tf.float64
error_tol = 1e-2
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([1.0, 1.0, 2.0])
maturities = np.array([5.0, 6.0, 4.0])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries)
price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
dim=2,
mean_reversion=self.mean_reversion_2d,
volatility=self.volatility_2d,
discount_rate_fn=discount_rate_fn,
num_samples=500000,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
self.assertEqual(price.dtype, dtype)
self.assertAllEqual(price.shape, [3, 1])
price = self.evaluate(price)
expected = [[0.03115176], [0.03789011], [0.02266191]]
self.assertAllClose(price, expected, rtol=error_tol, atol=error_tol)
def test_call_put(self):
"""Tests call and put pricing."""
dtype = tf.float64
error_tol = 1e-2
discount_rate_fn = lambda x: 0.01 * tf.ones_like(x, dtype=dtype)
expiries = np.array([1.0, 1.0, 2.0])
maturities = np.array([5.0, 6.0, 4.0])
strikes = np.exp(-0.01 * maturities) / np.exp(-0.01 * expiries) - 0.01
price = tff.models.hjm.bond_option_price(
strikes=strikes,
expiries=expiries,
maturities=maturities,
is_call_options=[True, False, False],
dim=2,
mean_reversion=self.mean_reversion_2d,
volatility=self.volatility_2d,
discount_rate_fn=discount_rate_fn,
num_samples=500000,
time_step=0.1,
random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,
dtype=dtype,
seed=[1, 2])
self.assertEqual(price.dtype, dtype)
self.assertAllEqual(price.shape, [3, 1])
price = self.evaluate(price)
expected = [[0.03620415], [0.03279728], [0.01784987]]
self.assertAllClose(price, expected, rtol=error_tol, atol=error_tol)
if __name__ == '__main__':
tf.test.main()
```
#### File: models/hjm/zero_coupon_bond_option_util.py
```python
import tensorflow.compat.v2 as tf
from tf_quant_finance.models import utils
def options_price_from_samples(strikes,
expiries,
maturities,
is_call_options,
sample_discount_curve_paths_fn,
num_samples,
time_step,
dtype=None):
"""Computes the zero coupon bond options price from simulated discount curves.
Args:
strikes: A real `Tensor` of any shape and dtype. The strike price of the
options. The shape of this input determines the number (and shape) of the
options to be priced and the output.
expiries: A real `Tensor` of the same dtype and compatible shape as
`strikes`. The time to expiry of each bond option.
maturities: A real `Tensor` of the same dtype and compatible shape as
`strikes`. The time to maturity of the underlying zero coupon bonds.
is_call_options: A boolean `Tensor` of a shape compatible with `strikes`.
Indicates whether the option is a call (if True) or a put (if False).
sample_discount_curve_paths_fn: Callable which takes the following args:
1) times: Rank 1 `Tensor` of positive real values, specifying the times at
which the path points are to be evaluated.
2) curve_times: Rank 1 `Tensor` of positive real values, specifying the
maturities at which the discount curve is to be computed at each
simulation time.
3) num_samples: Positive scalar integer specifying the number of paths to
draw.
and returns two `Tensor`s, the first being a Rank-4 tensor of shape
[num_samples, m, k, d] containing the simulated zero coupon bond curves,
and the second being a `Tensor` of shape [num_samples, k, d] containing
the simulated short rate paths. Here, m is the size of `curve_times`, k
is the size of `times`, and d is the dimensionality of the paths.
num_samples: Positive scalar `int32` `Tensor`. The number of simulation
paths during Monte-Carlo valuation.
time_step: Scalar real `Tensor`. Maximal distance between time grid points
in Euler scheme. Relevant when Euler scheme is used for simulation.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: `None` which means that default dtypes inferred by
TensorFlow are used.
Returns:
A `Tensor` of real dtype and shape `strikes.shape + [dim]` containing the
computed option prices.
"""
sim_times, _ = tf.unique(tf.reshape(expiries, shape=[-1]))
longest_expiry = tf.reduce_max(sim_times)
sim_times, _ = tf.unique(
tf.concat(
[sim_times, tf.range(time_step, longest_expiry, time_step)], axis=0))
sim_times = tf.sort(sim_times, name='sort_sim_times')
tau = maturities - expiries
curve_times_builder, _ = tf.unique(tf.reshape(tau, shape=[-1]))
curve_times = tf.sort(curve_times_builder, name='sort_curve_times')
p_t_tau, r_t = sample_discount_curve_paths_fn(
times=sim_times, curve_times=curve_times, num_samples=num_samples)
dim = p_t_tau.shape[-1]
dt_builder = tf.concat(
axis=0,
values=[
tf.convert_to_tensor([0.0], dtype=dtype),
sim_times[1:] - sim_times[:-1]
])
dt = tf.expand_dims(tf.expand_dims(dt_builder, axis=-1), axis=0)
discount_factors_builder = tf.math.exp(-r_t * dt)
# Transpose before (and after) because we want the cumprod along axis=1
# and `matvec` operates on the last axis. The shape before and after would
# be `(num_samples, len(times), dim)`
discount_factors_builder = tf.transpose(
utils.cumprod_using_matvec(
tf.transpose(discount_factors_builder, [0, 2, 1])), [0, 2, 1])
# make discount factors the same shape as `p_t_tau`. This involves adding
# an extra dimenstion (corresponding to `curve_times`).
discount_factors_builder = tf.expand_dims(discount_factors_builder, axis=1)
discount_factors_simulated = tf.repeat(
discount_factors_builder, p_t_tau.shape.as_list()[1], axis=1)
# `sim_times` and `curve_times` are sorted for simulation. We need to
# select the indices corresponding to our input.
sim_time_index = tf.searchsorted(sim_times, tf.reshape(expiries, [-1]))
curve_time_index = tf.searchsorted(curve_times, tf.reshape(tau, [-1]))
gather_index = _prepare_indices(
tf.range(0, num_samples), curve_time_index, sim_time_index,
tf.range(0, dim))
# The shape after `gather_nd` would be (num_samples*num_strikes*dim,)
payoff_discount_factors_builder = tf.gather_nd(discount_factors_simulated,
gather_index)
# Reshape to `[num_samples] + strikes.shape + [dim]`
payoff_discount_factors = tf.reshape(payoff_discount_factors_builder,
[num_samples] + strikes.shape + [dim])
payoff_bond_price_builder = tf.gather_nd(p_t_tau, gather_index)
payoff_bond_price = tf.reshape(payoff_bond_price_builder,
[num_samples] + strikes.shape + [dim])
is_call_options = tf.reshape(
tf.broadcast_to(is_call_options, strikes.shape),
[1] + strikes.shape + [1])
strikes = tf.reshape(strikes, [1] + strikes.shape + [1])
payoff = tf.where(is_call_options,
tf.math.maximum(payoff_bond_price - strikes, 0.0),
tf.math.maximum(strikes - payoff_bond_price, 0.0))
option_value = tf.math.reduce_mean(payoff_discount_factors * payoff, axis=0)
return option_value
def _prepare_indices(idx0, idx1, idx2, idx3):
"""Prepare indices to get relevant slice from discount curve simulations."""
len0 = idx0.shape.as_list()[0]
len1 = idx1.shape.as_list()[0]
len3 = idx3.shape.as_list()[0]
idx0 = tf.repeat(idx0, len1 * len3)
idx1 = tf.tile(tf.repeat(idx1, len3), [len0])
idx2 = tf.tile(tf.repeat(idx2, len3), [len0])
idx3 = tf.tile(idx3, [len0 * len1])
return tf.stack([idx0, idx1, idx2, idx3], axis=-1)
```
#### File: tf_quant_finance/models/ito_process.py
```python
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class ItoProcess(object):
"""Interface for specifying Ito processes.
Interface for defining stochastic process defined by the Ito SDE:
```None
dX_i = a_i(t, X) dt + Sum(S_{ij}(t, X) dW_j for 1 <= j <= n), 1 <= i <= n
```
The vector coefficient `a_i` is referred to as the drift of the process and
the matrix `S_{ij}` as the volatility of the process. For the process to be
well defined, these coefficients need to satisfy certain technical conditions
which may be found in Ref. [1]. The vector `dW_j` represents independent
Brownian increments.
For a simple and instructive example of the implementation of this interface,
see `models.GenericItoProcess`.
#### References
[1]: <NAME>. Stochastic Differential Equations: An Introduction with
Applications. Springer. 2010.
"""
@abc.abstractmethod
def name(self):
"""The name to give to ops created by this class."""
pass
@abc.abstractmethod
def dim(self):
"""The dimension of the process. A positive python integer."""
pass
@abc.abstractmethod
def dtype(self):
"""The data type of process realizations."""
pass
@abc.abstractmethod
def drift_fn(self):
"""Python callable calculating instantaneous drift.
The callable should accept two real `Tensor` arguments of the same dtype.
The first argument is the scalar time t, the second argument is the value of
Ito process X as a tensor of shape `batch_shape + [dim]`. The result is
value of drift a(t, X). The return value of the callable is a real `Tensor`
of the same dtype as the input arguments and of shape `batch_shape + [dim]`.
"""
pass
@abc.abstractmethod
def volatility_fn(self):
"""Python callable calculating the instantaneous volatility matrix.
The callable should accept two real `Tensor` arguments of the same dtype.
The first argument is the scalar time t and the second argument is the value
of Ito process X as a tensor of shape `batch_shape + [dim]`. The result is
the instantaneous volatility matrix at time t and location X: S(t, X). The
return value of the callable is a real `Tensor` of the same dtype as the
input arguments and of shape `batch_shape + [dim, dim]`.
"""
pass
@abc.abstractmethod
def sample_paths(self,
times,
num_samples=1,
initial_state=None,
random_type=None,
seed=None,
**kwargs):
"""Returns a sample of paths from the process.
Args:
times: Rank 1 `Tensor` of increasing positive real values. The times at
which the path points are to be evaluated.
num_samples: Positive scalar `int`. The number of paths to draw.
initial_state: `Tensor` of shape `[dim]`. The initial state of the
process.
Default value: None which maps to a zero initial state.
random_type: Enum value of `RandomType`. The type of (quasi)-random number
generator to use to generate the paths.
Default value: None which maps to the standard pseudo-random numbers.
seed: Seed for the random number generator. The seed is
only relevant if `random_type` is one of
`[STATELESS, PSEUDO, HALTON_RANDOMIZED, PSEUDO_ANTITHETIC,
STATELESS_ANTITHETIC]`. For `PSEUDO`, `PSEUDO_ANTITHETIC` and
`HALTON_RANDOMIZED` the seed should be an Python integer. For
`STATELESS` and `STATELESS_ANTITHETIC `must be supplied as an integer
`Tensor` of shape `[2]`.
Default value: `None` which means no seed is set.
**kwargs: Any other keyword args needed by an implementation.
Returns:
A real `Tensor` of shape [num_samples, k, n] where `k` is the size of the
`times`, `n` is the dimension of the process.
"""
pass
@abc.abstractmethod
def fd_solver_backward(self,
start_time,
end_time,
coord_grid,
values_grid,
discounting=None,
one_step_fn=None,
boundary_conditions=None,
start_step_count=0,
num_steps=None,
time_step=None,
values_transform_fn=None,
dtype=None,
**kwargs):
"""Returns a solver for Feynman-Kac PDE associated to the process.
This method applies a finite difference method to solve the final value
problem as it appears in the Feynman-Kac formula associated to this Ito
process. The Feynman-Kac PDE is closely related to the backward Kolomogorov
equation associated to the stochastic process and allows for the inclusion
of a discounting function.
For more details of the Feynman-Kac theorem see [1]. The PDE solved by this
method is:
```None
V_t + Sum[mu_i(t, x) V_i, 1<=i<=n] +
(1/2) Sum[ D_{ij} V_{ij}, 1 <= i,j <= n] - r(t, x) V = 0
```
In the above, `V_t` is the derivative of `V` with respect to `t`,
`V_i` is the partial derivative with respect to `x_i` and `V_{ij}` the
(mixed) partial derivative with respect to `x_i` and `x_j`. `mu_i` is the
drift of this process and `D_{ij}` are the components of the diffusion
tensor:
```None
D_{ij}(t,x) = (Sigma(t,x) . Transpose[Sigma(t,x)])_{ij}
```
This method evolves a spatially discretized solution of the above PDE from
time `t0` to time `t1 < t0` (i.e. backwards in time).
The solution `V(t,x)` is assumed to be discretized on an `n`-dimensional
rectangular grid. A rectangular grid, G, in n-dimensions may be described
by specifying the coordinates of the points along each axis. For example,
a 2 x 4 grid in two dimensions can be specified by taking the cartesian
product of [1, 3] and [5, 6, 7, 8] to yield the grid points with
coordinates: `[(1, 5), (1, 6), (1, 7), (1, 8), (3, 5) ... (3, 8)]`.
This method allows batching of solutions. In this context, batching means
the ability to represent and evolve multiple independent functions `V`
(e.g. V1, V2 ...) simultaneously. A single discretized solution is specified
by stating its values at each grid point. This can be represented as a
`Tensor` of shape [d1, d2, ... dn] where di is the grid size along the `i`th
axis. A batch of such solutions is represented by a `Tensor` of shape:
[K, d1, d2, ... dn] where `K` is the batch size. This method only requires
that the input parameter `values_grid` be broadcastable with shape
[K, d1, ... dn].
The evolution of the solution from `t0` to `t1` is often done by
discretizing the differential equation to a difference equation along
the spatial and temporal axes. The temporal discretization is given by a
(sequence of) time steps [dt_1, dt_2, ... dt_k] such that the sum of the
time steps is equal to the total time step `t0 - t1`. If a uniform time
step is used, it may equivalently be specified by stating the number of
steps (n_steps) to take. This method provides both options via the
`time_step` and `num_steps` parameters. However, not all methods need
discretization along time direction (e.g. method of lines) so this argument
may not be applicable to some implementations.
The workhorse of this method is the `one_step_fn`. For the commonly used
methods, see functions in `math.pde.steppers` module.
The mapping between the arguments of this method and the above
equation are described in the Args section below.
For a simple instructive example of implementation of this method, see
`models.GenericItoProcess.fd_solver_backward`.
TODO(b/142309558): Complete documentation.
Args:
start_time: Real positive scalar `Tensor`. The start time of the grid.
Corresponds to time `t0` above.
end_time: Real scalar `Tensor` smaller than the `start_time` and greater
than zero. The time to step back to. Corresponds to time `t1` above.
coord_grid: List of `n` rank 1 real `Tensor`s. `n` is the dimension of the
domain. The i-th `Tensor` has shape, `[d_i]` where `d_i` is the size of
the grid along axis `i`. The coordinates of the grid points. Corresponds
to the spatial grid `G` above.
values_grid: Real `Tensor` containing the function values at time
`start_time` which have to be stepped back to time `end_time`. The shape
of the `Tensor` must broadcast with `[K, d_1, d_2, ..., d_n]`. The first
axis of size `K` is the values batch dimension and allows multiple
functions (with potentially different boundary/final conditions) to be
stepped back simultaneously.
discounting: Callable corresponding to `r(t,x)` above. If not supplied,
zero discounting is assumed.
one_step_fn: The transition kernel. A callable that consumes the following
arguments by keyword:
1. 'time': Current time
2. 'next_time': The next time to step to. For the backwards in time
evolution, this time will be smaller than the current time.
3. 'coord_grid': The coordinate grid.
4. 'values_grid': The values grid.
5. 'boundary_conditions': The boundary conditions.
6. 'quadratic_coeff': A callable returning the quadratic coefficients
of the PDE (i.e. `(1/2)D_{ij}(t, x)` above). The callable accepts
the time and coordinate grid as keyword arguments and returns a
`Tensor` with shape that broadcasts with `[dim, dim]`.
7. 'linear_coeff': A callable returning the linear coefficients of the
PDE (i.e. `mu_i(t, x)` above). Accepts time and coordinate grid as
keyword arguments and returns a `Tensor` with shape that broadcasts
with `[dim]`.
8. 'constant_coeff': A callable returning the coefficient of the
linear homogenous term (i.e. `r(t,x)` above). Same spec as above.
The `one_step_fn` callable returns a 2-tuple containing the next
coordinate grid, next values grid.
boundary_conditions: A list of size `dim` containing boundary conditions.
The i'th element of the list is a 2-tuple containing the lower and upper
boundary condition for the boundary along the i`th axis.
start_step_count: Scalar integer `Tensor`. Initial value for the number of
time steps performed.
Default value: 0 (i.e. no previous steps performed).
num_steps: Positive int scalar `Tensor`. The number of time steps to take
when moving from `start_time` to `end_time`. Either this argument or the
`time_step` argument must be supplied (but not both). If num steps is
`k>=1`, uniform time steps of size `(t0 - t1)/k` are taken to evolve the
solution from `t0` to `t1`. Corresponds to the `n_steps` parameter
above.
time_step: The time step to take. Either this argument or the `num_steps`
argument must be supplied (but not both). The type of this argument may
be one of the following (in order of generality): (a) None in which case
`num_steps` must be supplied. (b) A positive real scalar `Tensor`. The
maximum time step to take. If the value of this argument is `dt`, then
the total number of steps taken is N = (t0 - t1) / dt rounded up to
the nearest integer. The first N-1 steps are of size dt and the last
step is of size `t0 - t1 - (N-1) * dt`. (c) A callable accepting the
current time and returning the size of the step to take. The input and
the output are real scalar `Tensor`s.
values_transform_fn: An optional callable applied to transform the
solution values at each time step. The callable is invoked after the
time step has been performed. The callable should accept the time of the
grid, the coordinate grid and the values grid and should return the
values grid. All input arguments to be passed by keyword.
dtype: The dtype to use.
**kwargs: Any other keyword args needed by an implementation.
Returns:
A tuple object containing at least the following attributes:
final_values_grid: A `Tensor` of same shape and dtype as `values_grid`.
Contains the final state of the values grid at time `end_time`.
final_coord_grid: A list of `Tensor`s of the same specification as
the input `coord_grid`. Final state of the coordinate grid at time
`end_time`.
step_count: The total step count (i.e. the sum of the `start_step_count`
and the number of steps performed in this call.).
final_time: The final time at which the evolution stopped. This value
is given by `max(min(end_time, start_time), 0)`.
"""
pass
def fd_solver_forward(self,
start_time,
end_time,
coord_grid,
values_grid,
one_step_fn=None,
boundary_conditions=None,
start_step_count=0,
num_steps=None,
time_step=None,
values_transform_fn=None,
dtype=None,
**kwargs):
r"""Returns a solver for the Fokker Plank equation of this process.
The Fokker Plank equation (also known as the Kolmogorov Forward equation)
associated to this Ito process is given by:
```None
V_t + Sum[(mu_i(t, x) V)_i, 1<=i<=n]
- (1/2) Sum[ (D_{ij} V)_{ij}, 1 <= i,j <= n] = 0
```
with the initial value condition $$V(0, x) = u(x)$$.
This method evolves a spatially discretized solution of the above PDE from
time `t0` to time `t1 > t0` (i.e. forwards in time).
The solution `V(t,x)` is assumed to be discretized on an `n`-dimensional
rectangular grid. A rectangular grid, G, in n-dimensions may be described
by specifying the coordinates of the points along each axis. For example,
a 2 x 4 grid in two dimensions can be specified by taking the cartesian
product of [1, 3] and [5, 6, 7, 8] to yield the grid points with
coordinates: `[(1, 5), (1, 6), (1, 7), (1, 8), (3, 5) ... (3, 8)]`.
Batching of solutions is supported. In this context, batching means
the ability to represent and evolve multiple independent functions `V`
(e.g. V1, V2 ...) simultaneously. A single discretized solution is specified
by stating its values at each grid point. This can be represented as a
`Tensor` of shape [d1, d2, ... dn] where di is the grid size along the `i`th
axis. A batch of such solutions is represented by a `Tensor` of shape:
[K, d1, d2, ... dn] where `K` is the batch size. This method only requires
that the input parameter `values_grid` be broadcastable with shape
[K, d1, ... dn].
The evolution of the solution from `t0` to `t1` is often done by
discretizing the differential equation to a difference equation along
the spatial and temporal axes. The temporal discretization is given by a
(sequence of) time steps [dt_1, dt_2, ... dt_k] such that the sum of the
time steps is equal to the total time step `t1 - t0`. If a uniform time
step is used, it may equivalently be specified by stating the number of
steps (n_steps) to take. This method provides both options via the
`time_step` and `num_steps` parameters. However, not all methods need
discretization along time direction (e.g. method of lines) so this argument
may not be applicable to some implementations.
The workhorse of this method is the `one_step_fn`. For the commonly used
methods, see functions in `math.pde.steppers` module.
The mapping between the arguments of this method and the above
equation are described in the Args section below.
For a simple instructive example of implementation of this method, see
`models.GenericItoProcess.fd_solver_forward`.
TODO(b/142309558): Complete documentation.
Args:
start_time: Real positive scalar `Tensor`. The start time of the grid.
Corresponds to time `t0` above.
end_time: Real scalar `Tensor` smaller than the `start_time` and greater
than zero. The time to step back to. Corresponds to time `t1` above.
coord_grid: List of `n` rank 1 real `Tensor`s. `n` is the dimension of the
domain. The i-th `Tensor` has shape, `[d_i]` where `d_i` is the size of
the grid along axis `i`. The coordinates of the grid points. Corresponds
to the spatial grid `G` above.
values_grid: Real `Tensor` containing the function values at time
`start_time` which have to be stepped back to time `end_time`. The shape
of the `Tensor` must broadcast with `[K, d_1, d_2, ..., d_n]`. The first
axis of size `K` is the values batch dimension and allows multiple
functions (with potentially different boundary/final conditions) to be
stepped back simultaneously.
one_step_fn: The transition kernel. A callable that consumes the following
arguments by keyword:
1. 'time': Current time
2. 'next_time': The next time to step to. For the backwards in time
evolution, this time will be smaller than the current time.
3. 'coord_grid': The coordinate grid.
4. 'values_grid': The values grid.
5. 'quadratic_coeff': A callable returning the quadratic coefficients
of the PDE (i.e. `(1/2)D_{ij}(t, x)` above). The callable accepts
the time and coordinate grid as keyword arguments and returns a
`Tensor` with shape that broadcasts with `[dim, dim]`.
6. 'linear_coeff': A callable returning the linear coefficients of the
PDE (i.e. `mu_i(t, x)` above). Accepts time and coordinate grid as
keyword arguments and returns a `Tensor` with shape that broadcasts
with `[dim]`.
7. 'constant_coeff': A callable returning the coefficient of the
linear homogenous term (i.e. `r(t,x)` above). Same spec as above.
The `one_step_fn` callable returns a 2-tuple containing the next
coordinate grid, next values grid.
boundary_conditions: A list of size `dim` containing boundary conditions.
The i'th element of the list is a 2-tuple containing the lower and upper
boundary condition for the boundary along the i`th axis.
start_step_count: Scalar integer `Tensor`. Initial value for the number of
time steps performed.
Default value: 0 (i.e. no previous steps performed).
num_steps: Positive int scalar `Tensor`. The number of time steps to take
when moving from `start_time` to `end_time`. Either this argument or the
`time_step` argument must be supplied (but not both). If num steps is
`k>=1`, uniform time steps of size `(t0 - t1)/k` are taken to evolve the
solution from `t0` to `t1`. Corresponds to the `n_steps` parameter
above.
time_step: The time step to take. Either this argument or the `num_steps`
argument must be supplied (but not both). The type of this argument may
be one of the following (in order of generality): (a) None in which case
`num_steps` must be supplied. (b) A positive real scalar `Tensor`. The
maximum time step to take. If the value of this argument is `dt`, then
the total number of steps taken is N = (t1 - t0) / dt rounded up to
the nearest integer. The first N-1 steps are of size dt and the last
step is of size `t1 - t0 - (N-1) * dt`. (c) A callable accepting the
current time and returning the size of the step to take. The input and
the output are real scalar `Tensor`s.
values_transform_fn: An optional callable applied to transform the
solution values at each time step. The callable is invoked after the
time step has been performed. The callable should accept the time of the
grid, the coordinate grid and the values grid and should return the
values grid. All input arguments to be passed by keyword.
dtype: The dtype to use.
**kwargs: Any other keyword args needed by an implementation.
Returns:
A tuple object containing at least the following attributes:
final_values_grid: A `Tensor` of same shape and dtype as `values_grid`.
Contains the final state of the values grid at time `end_time`.
final_coord_grid: A list of `Tensor`s of the same specification as
the input `coord_grid`. Final state of the coordinate grid at time
`end_time`.
step_count: The total step count (i.e. the sum of the `start_step_count`
and the number of steps performed in this call.).
final_time: The final time at which the evolution stopped. This value
is given by `max(min(end_time, start_time), 0)`.
"""
pass
```
|
{
"source": "jeorryb/30daysofcode",
"score": 4
}
|
#### File: 30daysofcode/01_Word_Value_P1/wordvalue.py
```python
from data import DICTIONARY, LETTER_SCORES
def load_words(dictfile=DICTIONARY):
"""Load dictionary into a list and return list"""
try:
with open(dictfile) as f:
websters = f.readlines()
websters = [x.strip() for x in websters]
return websters
except TypeError:
return dictfile
def calc_word_value(word, score=0):
"""Calculate the value of the word entered into function
using imported constant mapping LETTER_SCORES"""
for w in word:
if not w.isalpha():
continue
score += LETTER_SCORES[w.upper()]
return score
def max_word_value(wordlist=DICTIONARY):
"""Calculate the word with the max value, can receive a list
of words as arg, if none provided uses default DICTIONARY"""
wordscores = []
websters = load_words(dictfile=wordlist)
for word in websters:
score = calc_word_value(word)
entry = (word, score)
wordscores.append(entry)
wordscores = sorted(wordscores, key=lambda x: x[1], reverse=True)
return wordscores[0][0]
if __name__ == "__main__":
pass # run unittests to validate
```
#### File: 30daysofcode/02_Word_Value_P2/game_help.py
```python
import itertools
import random
from data import DICTIONARY, LETTER_SCORES, POUCH
NUM_LETTERS = 7
def draw_letters():
"""Pick NUM_LETTERS letters randomly. Hint: use stdlib random"""
draw = random.sample(POUCH, NUM_LETTERS)
return draw
def input_word(draw):
"""Ask player for a word and validate against draw.
Use _validation(word, draw) helper."""
word = input("Enter your word: ")
if _validation(word, draw):
return word
def _validation(word, draw):
"""Validations: 1) only use letters of draw, 2) valid dictionary word"""
for w in word.upper():
if w not in draw:
print(f'Letter {w} not in draw')
return False
break
elif draw.count(w) < word.upper().count(w):
print(f'Letter {w} used too many times')
return False
break
else:
continue
return True
# From challenge 01:
def calc_word_value(word):
"""Calc a given word value based on Scrabble LETTER_SCORES mapping"""
return sum(LETTER_SCORES.get(char.upper(), 0) for char in word)
# Below 2 functions pass through the same 'draw' argument (smell?).
# Maybe you want to abstract this into a class?
# get_possible_dict_words and _get_permutations_draw would be instance methods.
# 'draw' would be set in the class constructor (__init__).
def get_possible_dict_words(draw):
"""Get all possible words from draw which are valid dictionary words.
Use the _get_permutations_draw helper and DICTIONARY constant"""
words = []
permuts = _get_permutations_draw(draw)
for p in permuts:
if p.lower() in DICTIONARY:
words.append(p)
return words
def _get_permutations_draw(draw):
"""Helper for get_possible_dict_words to get all permutations of draw letters.
Hint: use itertools.permutations"""
permuts = list(''.join(h) for h in (x for l in range(1, 8) for x in itertools.permutations(draw, l)))
return permuts
# From challenge 01:
def max_word_value(words):
"""Calc the max value of a collection of words"""
return max(words, key=calc_word_value)
def main():
"""Main game interface calling the previously defined methods"""
draw = draw_letters()
print('Letters drawn: {}'.format(', '.join(draw)))
word = input_word(draw)
word_score = calc_word_value(word)
print('Word chosen: {} (value: {})'.format(word, word_score))
possible_words = get_possible_dict_words(draw)
max_word = max_word_value(possible_words)
max_word_score = calc_word_value(max_word)
print('Optimal word possible: {} (value: {})'.format(
max_word, max_word_score))
game_score = word_score / max_word_score * 100
print('You scored: {:.1f}'.format(game_score))
if __name__ == "__main__":
main()
```
#### File: 30daysofcode/03_Twitter_Analysis_P1/usertweets-help.py
```python
from collections import namedtuple
import csv
import os
import tweepy
from config import CONSUMER_KEY, CONSUMER_SECRET
from config import ACCESS_TOKEN, ACCESS_SECRET
DEST_DIR = 'data'
EXT = 'csv'
NUM_TWEETS = 100
Tweet = namedtuple('Tweet', 'id_str created_at text')
class UserTweets(object):
def __init__(self, handle, max_id=None):
"""Get handle and optional max_id.
Use tweepy.OAuthHandler, set_access_token and tweepy.API
to create api interface.
Use _get_tweets() helper to get a list of tweets.
Save the tweets as data/<handle>.csv"""
# ...
self._tweets = list(self._get_tweets())
self._save_tweets()
def _get_tweets(self):
"""Hint: use the user_timeline() method on the api you defined in init.
See tweepy API reference: http://docs.tweepy.org/en/v3.5.0/api.html
Use a list comprehension / generator to filter out fields
id_str created_at text (optionally use namedtuple)"""
pass
def _save_tweets(self):
"""Use the csv module (csv.writer) to write out the tweets.
If you use a namedtuple get the column names with Tweet._fields.
Otherwise define them as: id_str created_at text
You can use writerow for the header, writerows for the rows"""
pass
def __len__(self):
"""See http://pybit.es/python-data-model.html"""
pass
def __getitem__(self, pos):
"""See http://pybit.es/python-data-model.html"""
pass
if __name__ == "__main__":
for handle in ('pybites', 'techmoneykids', 'bbelderbos'):
print('--- {} ---'.format(handle))
user = UserTweets(handle)
for tw in user[:5]:
print(tw)
print()
```
|
{
"source": "jeorryb/datarambler",
"score": 3
}
|
#### File: plugins/interlinks/interlinks.py
```python
๏ปฟ# -*- coding: utf-8 -*-
"""
Interlinks
=========================
This plugin allows you to include "interwiki" or shortcuts links into the blog, as keyword>rest_of_url
"""
from bs4 import BeautifulSoup
from bs4 import SoupStrainer
from pelican import signals
import re
interlinks = {}
def getSettings (generator):
global interlinks
interlinks = {'this': generator.settings['SITEURL']+"/"}
if 'INTERLINKS' in generator.settings:
for key, value in generator.settings['INTERLINKS'].items():
interlinks[key] = value
def parse_links(instance):
if instance._content is not None:
content = instance._content
if '<a' in content:
text = BeautifulSoup(content, "html.parser", parse_only=SoupStrainer("a"))
for link in text.find_all("a",href=re.compile("(.+?)>")):
old_tag = str(link)
url = link.get('href')
m = re.search(r"(.+?)>", url).groups()
name = m[0]
if name in interlinks:
hi = url.replace(name + ">", interlinks[name])
link['href'] = hi
content = content.replace(old_tag, str(link))
if '<img' in content:
text = BeautifulSoup(content, "html.parser", parse_only=SoupStrainer("img"))
for img in text.find_all('img', src=re.compile("(.+?)>")):
old_tag = str(img)
url = img.get('src')
m = re.search(r"(.+?)>", url).groups()
name = m[0]
if name in interlinks:
hi = url.replace(name+">",interlinks[name])
img['src'] = hi
content = content.replace(old_tag, str(link))
instance._content = content
def register():
signals.generator_init.connect(getSettings)
signals.content_object_init.connect(parse_links)
```
#### File: plugins/render_math/pelican_mathjax_markdown_extension.py
```python
import markdown
from markdown.util import etree
from markdown.util import AtomicString
class PelicanMathJaxPattern(markdown.inlinepatterns.Pattern):
"""Inline markdown processing that matches mathjax"""
def __init__(self, pelican_mathjax_extension, tag, pattern):
super(PelicanMathJaxPattern,self).__init__(pattern)
self.math_tag_class = pelican_mathjax_extension.getConfig('math_tag_class')
self.pelican_mathjax_extension = pelican_mathjax_extension
self.tag = tag
def handleMatch(self, m):
node = markdown.util.etree.Element(self.tag)
node.set('class', self.math_tag_class)
prefix = '\\(' if m.group('prefix') == '$' else m.group('prefix')
suffix = '\\)' if m.group('suffix') == '$' else m.group('suffix')
node.text = markdown.util.AtomicString(prefix + m.group('math') + suffix)
# If mathjax was successfully matched, then JavaScript needs to be added
# for rendering. The boolean below indicates this
self.pelican_mathjax_extension.mathjax_needed = True
return node
class PelicanMathJaxCorrectDisplayMath(markdown.treeprocessors.Treeprocessor):
"""Corrects invalid html that results from a <div> being put inside
a <p> for displayed math"""
def __init__(self, pelican_mathjax_extension):
self.pelican_mathjax_extension = pelican_mathjax_extension
def correct_html(self, root, children, div_math, insert_idx, text):
"""Separates out <div class="math"> from the parent tag <p>. Anything
in between is put into its own parent tag of <p>"""
current_idx = 0
for idx in div_math:
el = markdown.util.etree.Element('p')
el.text = text
el.extend(children[current_idx:idx])
# Test to ensure that empty <p> is not inserted
if len(el) != 0 or (el.text and not el.text.isspace()):
root.insert(insert_idx, el)
insert_idx += 1
text = children[idx].tail
children[idx].tail = None
root.insert(insert_idx, children[idx])
insert_idx += 1
current_idx = idx+1
el = markdown.util.etree.Element('p')
el.text = text
el.extend(children[current_idx:])
if len(el) != 0 or (el.text and not el.text.isspace()):
root.insert(insert_idx, el)
def run(self, root):
"""Searches for <div class="math"> that are children in <p> tags and corrects
the invalid HTML that results"""
math_tag_class = self.pelican_mathjax_extension.getConfig('math_tag_class')
for parent in root:
div_math = []
children = list(parent)
for div in parent.findall('div'):
if div.get('class') == math_tag_class:
div_math.append(children.index(div))
# Do not process further if no displayed math has been found
if not div_math:
continue
insert_idx = list(root).index(parent)
self.correct_html(root, children, div_math, insert_idx, parent.text)
root.remove(parent) # Parent must be removed last for correct insertion index
return root
class PelicanMathJaxAddJavaScript(markdown.treeprocessors.Treeprocessor):
"""Tree Processor for adding Mathjax JavaScript to the blog"""
def __init__(self, pelican_mathjax_extension):
self.pelican_mathjax_extension = pelican_mathjax_extension
def run(self, root):
# If no mathjax was present, then exit
if (not self.pelican_mathjax_extension.mathjax_needed):
return root
# Add the mathjax script to the html document
mathjax_script = etree.Element('script')
mathjax_script.set('type','text/javascript')
mathjax_script.text = AtomicString(self.pelican_mathjax_extension.getConfig('mathjax_script'))
root.append(mathjax_script)
# Reset the boolean switch to false so that script is only added
# to other pages if needed
self.pelican_mathjax_extension.mathjax_needed = False
return root
class PelicanMathJaxExtension(markdown.Extension):
"""A markdown extension enabling mathjax processing in Markdown for Pelican"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
self.config['mathjax_script'] = ['', 'Mathjax JavaScript script']
self.config['math_tag_class'] = ['math', 'The class of the tag in which mathematics is wrapped']
self.config['auto_insert'] = [True, 'Determines if mathjax script is automatically inserted into content']
super(PelicanMathJaxExtension,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
config['mathjax_script'] = [config['mathjax_script'], 'Mathjax JavaScript script']
config['math_tag_class'] = [config['math_tag_class'], 'The class of the tag in which mathematic is wrapped']
config['auto_insert'] = [config['auto_insert'], 'Determines if mathjax script is automatically inserted into content']
super(PelicanMathJaxExtension,self).__init__(config)
# Used as a flag to determine if javascript
# needs to be injected into a document
self.mathjax_needed = False
def extendMarkdown(self, md, md_globals):
# Regex to detect mathjax
mathjax_inline_regex = r'(?P<prefix>\$)(?P<math>.+?)(?P<suffix>(?<!\s)\2)'
mathjax_display_regex = r'(?P<prefix>\$\$|\\begin\{(.+?)\})(?P<math>.+?)(?P<suffix>\2|\\end\{\3\})'
# Process mathjax before escapes are processed since escape processing will
# intefer with mathjax. The order in which the displayed and inlined math
# is registered below matters
md.inlinePatterns.add('mathjax_displayed', PelicanMathJaxPattern(self, 'div', mathjax_display_regex), '<escape')
md.inlinePatterns.add('mathjax_inlined', PelicanMathJaxPattern(self, 'span', mathjax_inline_regex), '<escape')
# Correct the invalid HTML that results from teh displayed math (<div> tag within a <p> tag)
md.treeprocessors.add('mathjax_correctdisplayedmath', PelicanMathJaxCorrectDisplayMath(self), '>inline')
# If necessary, add the JavaScript Mathjax library to the document. This must
# be last in the ordered dict (hence it is given the position '_end')
if self.getConfig('auto_insert'):
md.treeprocessors.add('mathjax_addjavascript', PelicanMathJaxAddJavaScript(self), '_end')
```
|
{
"source": "jeorryb/pyasan",
"score": 3
}
|
#### File: pyasan/pyasan/patents.py
```python
from . import helpers
import pprint
class Patents(object):
def __init__(self, url=helpers.get_url('PATENT'), params=None, **kwargs):
self.url = url
self.params = params
def req(self):
self.response = helpers.api_get(self.url, self.params)
return self.response
def __str__(self):
return pprint.pformat(self.req())
def get(query=None, patent_num=None,
center=None, patent_exp=None,
case_num=None, title=None,
app_sn=None, status=None, **kwargs):
params = {'$q': query,
'patent_number': patent_num,
'center': center,
'case_number': case_num,
'title': title,
'application_sn': app_sn,
'status': status}
pat = Patents(params=params)
return pat.req()
```
#### File: pyasan/tests/test_patents.py
```python
from pyasan import patents, Patents
import vcr
from pytest import fixture
@fixture
def patent_keys():
#Responsible only for returning the test data.
return ['patent_number', 'center', 'patent_expiration_date',
'case_number', 'title', 'application_sn',
'status']
@vcr.use_cassette('tests/vcr_cassettes/Patent_req.yml')
def test_Patent_req_successful(patent_keys):
"""Tests and API call to return a patent from the NASA API"""
patent_params = {'$q': 'fuel'}
p = Patents(params=patent_params)
test_query = p.req()
assert isinstance(test_query[1], dict)
assert set(patent_keys).issubset(test_query[1].keys()), 'All keys should be in the response'
assert 'fuel' in test_query[1]['title'].lower(), 'Query should be in the title'
@vcr.use_cassette('tests/vcr_cassettes/Patent_req.yml')
def test_patent_get_successful(patent_keys):
"""Tests get call to query patent database from NASA API"""
test_get = patents.get(query='fuel')
assert isinstance(test_get[1], dict)
assert set(patent_keys).issubset(test_get[1].keys()), 'All keys should be in the response'
assert 'fuel' in test_get[1]['title'].lower(), 'Query should be in the title'
```
|
{
"source": "jeosol/bazelisp",
"score": 2
}
|
#### File: jeosol/bazelisp/provider.bzl
```python
LispInfo = provider(
doc = "Information about a lisp_* target and its transitive dependencies.",
fields = {
"fasls": "Depset of FASLs for transitive dependencies",
"srcs": "Depset of transitive sources",
"hashes": "Depset of md5 hash files for transitive sources",
"warnings": (
"Depset of files of warnings checked at link time (FASL load) " +
"for transitive sources"
),
"features": "Depset of transitive declared Lisp features",
"compile_data": (
"Depset of files from transitive compile_data, made available " +
"at build time as well as runtime."
),
"cc_info": "CcInfo representing transitive C++ dependencies for linking",
},
)
def collect_lisp_info(deps = [], cdeps = [], build_image = None, features = [], compile_data = []):
"""Create a LispInfo collecting the data needed for Lisp compilation.
Args:
deps: Immediate Lisp dependencies of this target.
cdeps: Immediate C++ dependencies of this target.
build_image: Optional build image Target, which may also contain
dependencies. May be unset if this target just propagates LispInfo
from its dependencies.
features: Lisp features added by this target.
compile_data: Data dependency Targets of this target, which are made
available to this target and its consumers' at build time as well
as runtime.
Returns:
LispInfo
"""
lisp_infos = [dep[LispInfo] for dep in deps]
if build_image and LispInfo in build_image:
lisp_infos.append(build_image[LispInfo])
cc_infos = [lisp_info.cc_info for lisp_info in lisp_infos]
for cdep in cdeps:
cc_infos.append(cdep[CcInfo])
transitive_compile_data = [li.compile_data for li in lisp_infos]
for compile_data_target in compile_data:
default_info = compile_data_target[DefaultInfo]
if default_info.files:
transitive_compile_data.append(default_info.files)
if default_info.default_runfiles:
transitive_compile_data.append(default_info.default_runfiles.files)
return LispInfo(
fasls = depset(
transitive = [li.fasls for li in lisp_infos],
order = "postorder",
),
srcs = depset(
transitive = [li.srcs for li in lisp_infos],
order = "postorder",
),
hashes = depset(transitive = [li.hashes for li in lisp_infos]),
warnings = depset(transitive = [li.warnings for li in lisp_infos]),
features = depset(
features,
transitive = [li.features for li in lisp_infos],
),
compile_data = depset(transitive = transitive_compile_data),
cc_info = cc_common.merge_cc_infos(cc_infos = cc_infos),
)
def extend_lisp_info(
base,
fasls = [],
srcs = [],
hashes = [],
warnings = []):
"""Extends a LispInfo with compilation inputs and outputs.
Args:
base: The base LispInfo provider to be extended.
fasls: FASLs generated for this target.
srcs: This target's Lisp sources.
hashes: Hash files for each file in srcs.
warnings: Warnings files for each file in srcs.
"""
return LispInfo(
fasls = depset(fasls, transitive = [base.fasls], order = "postorder"),
srcs = depset(srcs, transitive = [base.srcs], order = "postorder"),
hashes = depset(hashes, transitive = [base.hashes]),
warnings = depset(warnings, transitive = [base.warnings]),
features = base.features,
compile_data = base.compile_data,
cc_info = base.cc_info,
)
# buildozer: disable=print
def print_provider(p):
"""Prints the LispInfo provider.
Args:
p: A LispInfo provider.
"""
if p.fasls:
print("FASLs: %s" % [f.short_path for f in p.fasls.to_list()])
if p.srcs:
print("Srcs: %s" % [s.short_path for s in p.srcs.to_list()])
if p.hashes:
print("Hashes: %s" % [h.short_path for h in p.hashes.to_list()])
if p.warnings:
print("Warnings: %s" % [w.short_path for w in p.warnings.to_list()])
if p.features:
print("Features: %s" % p.features.to_list())
if p.compile_data:
print("Compile Data: %s" % p.compile_data.to_list())
```
#### File: jeosol/bazelisp/rules.bzl
```python
load(
":provider.bzl",
"LispInfo",
"collect_lisp_info",
"extend_lisp_info",
)
load("@rules_cc//cc:find_cc_toolchain.bzl", "find_cc_toolchain")
load("@bazel_skylib//rules:common_settings.bzl", "BuildSettingInfo")
_BAZEL_LISP_IMAGE = "//:image"
_BAZEL_LISP_IMAGE_MAIN = "bazel.main:main"
_BAZEL_LISP_IMAGE_ENV = {"LISP_MAIN": _BAZEL_LISP_IMAGE_MAIN}
_ELFINATE = "//:elfinate"
_DEFAULT_MALLOC = "@bazel_tools//tools/cpp:malloc"
_DEFAULT_LIBSBCL = "@local_sbcl//:c-support"
_COMPILATION_ORDERS = ["multipass", "serial", "parallel"]
_LISP_LIBRARY_ATTRS = {
"srcs": attr.label_list(
allow_files = [".lisp", ".lsp"],
doc = ("Common Lisp (`.lisp` or `.lsp`) source files. If there are " +
"multiple files in `srcs`, which other files in `srcs` are " +
"loaded before each file is compiled depends on the `order` " +
"attr."),
),
"deps": attr.label_list(
providers = [LispInfo],
doc = ("Common Lisp dependencies (generally [`lisp_library`]" +
"(#lisp-library), but you can put [`lisp_binary`]" +
"(#lisp-binary) in deps for testing)."),
),
"cdeps": attr.label_list(
providers = [CcInfo],
doc = ("C++ dependencies (generally [`cc_library`]" +
"(https://docs.bazel.build/versions/master/be/c-cpp.html" +
"#cc_library))."),
),
"experimental_block_compile": attr.bool(
default = False,
doc = ("Whether to block-compile the sources. If True, the " +
"sources will be block-compiled with " +
"SB-C:COMPILE-MULTIPLE-FILES as a single block, with " +
":BLOCK-COMPILE T.\n\n" +
"It should be noted that block compilation in SBCL " +
"currently has compiler crashes that prevent this feature " +
"from being enabled on all source code, and so this feature " +
"should be used with caution."),
),
"entry_points": attr.string_list(
default = [],
doc = ("If block-compiling, this is the list of entry points passed " +
"to SB-C:COMPILE-FILES (or if using the " +
"experimental_block_compile_specified feature, to COMPILE-FILE)"),
),
"experimental_block_compile_specified": attr.bool(
default = False,
doc = ("Whether to pass :SPECIFIED to :BLOCK-COMPILE in " +
":COMPILE-FILE. This causes SBCL to respect (START-BLOCK) " +
"and (END-BLOCK) declarations on a sub-file basis."),
),
"order": attr.string(
default = "serial",
values = _COMPILATION_ORDERS,
doc = (
"Compilation order, one of:\n" +
"\n" +
'`"serial"` (default) - Each source is compiled in an image ' +
"with previous sources loaded. (Note that in this " +
"configuration you should put a comment at the top of the " +
"list of srcs if there is more than one, so that formatters " +
"like Buildozer do not change the order.)\n" +
"\n" +
'`"multipass"` - Each source is compiled in an image with all ' +
"sources loaded.\n" +
"\n" +
'`"parallel"` - Each source is compiled independently.'
),
),
"data": attr.label_list(
allow_files = True,
doc = ("Data available to this target and its consumers in the " +
"runfiles directory at runtime."),
),
"compile_data": attr.label_list(
allow_files = True,
doc = ("Data available to this target and its consumers at build " +
"time, added to the inputs of LispCompile and LispCore " +
"actions."),
),
"add_features": attr.string_list(
doc = ("Names of symbols (by default in the keyword package) to be " +
"added to `\\*features\\*` of this library and its consumers, at " +
"compile time and in the resulting binary. Note that this " +
"differs from the [`features`](https://docs.bazel.build/" +
"versions/master/be/common-definitions.html#common.features) " +
"attribute common to all build rules which controls " +
"[toolchain](https://docs.bazel.build/versions/master/" +
"toolchains.html) features."),
),
"nowarn": attr.string_list(
doc = "Suppressed Lisp warning types or warning handlers.",
),
"image": attr.label(
allow_single_file = True,
executable = True,
cfg = "target",
default = Label(_BAZEL_LISP_IMAGE),
doc = (
"Lisp binary used as Bazel compilation image. This should be a " +
"binary with the main function `#'bazel:main` defined in " +
"`main.lisp`."
),
),
"verbose": attr.int(
default = 0,
doc = ("Enable verbose debugging output when analyzing and " +
"compiling this target (`0` = none (default), `3` = max)."),
),
"instrument_coverage": attr.int(
values = [-1, 0, 1],
default = -1,
doc = (
"Force coverage instrumentation. Possible values:\n" +
"\n" +
"`0`: Never instrument this target. Should be used if the" +
"target compiles generated source files or does not compile" +
"with coverage instrumentation.\n" +
"\n" +
"`1`: Always instrument this target. Generally should not be " +
"used outside of tests for the coverage implementation.\n" +
"\n" +
"`-1` (default): If coverage data collection is enabled, " +
"instrument this target per [`--instrumentation_filter]" +
"(https://docs.bazel.build/versions/master/" +
"command-line-reference.html#flag--instrumentation_filter).`"
),
),
"_additional_dynamic_load_outputs": attr.label(
default = Label(
"//:additional_dynamic_load_outputs",
),
providers = [BuildSettingInfo],
),
# Do not add references, temporary attribute for find_cc_toolchain.
"_cc_toolchain": attr.label(
default = Label("@bazel_tools//tools/cpp:current_cc_toolchain"),
),
"_grep_includes": attr.label(
allow_single_file = True,
executable = True,
cfg = "host",
default = Label("@bazel_tools//tools/cpp:grep-includes"),
),
}
_LISP_BINARY_ATTRS = dict(_LISP_LIBRARY_ATTRS)
_LISP_BINARY_ATTRS.update({
"main": attr.string(
default = "main",
doc = ("Name of function (by default in the `cl-user` package) or " +
"snippet of Lisp code to run when starting the binary. " +
'`"nil"` or `"t"` to start the default REPL. Can be ' +
"overridden by naming a function (or `nil` or `t`) in the " +
"`LISP_MAIN` environment variable."),
),
"malloc": attr.label(
default = _DEFAULT_MALLOC,
providers = [CcInfo],
doc = ("Target providing a custom malloc implementation. Same as " +
"[`cc_binary.malloc`](https://docs.bazel.build/versions/" +
"master/be/c-cpp.html#cc_binary.malloc). Note that these " +
"rules do not respect [`--custom_malloc`]" +
"(https://docs.bazel.build/versions/master/" +
"command-line-reference.html#flag--custom_malloc)."),
),
"stamp": attr.int(
values = [-1, 0, 1],
default = -1,
doc = ("Same as [`cc_binary.stamp`](https://docs.bazel.build/" +
"versions/master/be/c-cpp.html#cc_binary.stamp)."),
),
"allow_save_lisp": attr.bool(
default = False,
doc = ("Whether to preserve the ability to run `save-lisp-and-die` " +
"instead of altering the binary format to be more compatible " +
"with C++ debugging tools (which, for example, allows you to " +
"get combined stacktraces of C/C++ and Lisp code). Must be " +
"`True` for targets used as a compilation image."),
),
"precompile_generics": attr.bool(
default = True,
doc = "If `False`, skip precompiling generic functions.",
),
"save_runtime_options": attr.bool(
default = True,
doc = ("If `False`, process SBCL runtime options at the " +
"command-line on binary startup."),
),
"runtime": attr.label(
default = Label(_DEFAULT_LIBSBCL),
providers = [CcInfo],
doc = ("SBCL C++ dependencies. Consumers should generally omit this " +
"attr and use the default value."),
),
"_elfinate": attr.label(
default = Label(_ELFINATE),
executable = True,
allow_single_file = True,
cfg = "target",
),
"_custom_malloc": attr.label(
default = configuration_field(
fragment = "cpp",
name = "custom_malloc",
),
providers = [CcInfo],
),
})
_LISP_TEST_ATTRS = dict(_LISP_BINARY_ATTRS)
_LISP_TEST_ATTRS.update({
"stamp": attr.int(
values = [-1, 0, 1],
default = 0,
doc = ("Same as [`cc_test.stamp`](https://docs.bazel.build/" +
"versions/master/be/c-cpp.html#cc_test.stamp). Build version " +
"stamping is disabled by default."),
),
})
def _concat_fasls(ctx, inputs, output):
"""Concatenates several FASLs into a combined FASL.
Args:
ctx: Rule context
inputs: List of files to concatenate.
output: File output for the concatenated contents.
"""
if not inputs:
return None
elif len(inputs) == 1:
return inputs[0]
else:
cat_command = "cat ${@:2} > $1"
cat_args = ctx.actions.args()
cat_args.add(output)
cat_args.add_all(inputs)
ctx.actions.run_shell(
inputs = inputs,
outputs = [output],
progress_message = "Combining %{output}",
mnemonic = "LispConcatFASLs",
command = cat_command,
arguments = [cat_args],
)
return output
def _build_flags(ctx, add_features, verbose_level, instrument_coverage):
"""Returns Args for flags for all Lisp build actions.
Args:
ctx: The rule context.
add_features: Depset of transitive Lisp feature strings provided by this
target and its dependencies.
verbose_level: int indicating level of debugging output. If positive, a
--verbose flags is added.
instrument_coverage: Controls coverage instrumentation, with the following
values:
-1 (default) - Instruments if coverage is enabled for this target.
0 - Instruments never.
1 - Instruments always (for testing purposes).
Returns:
Args object to be passed to Lisp build actions.
"""
cc_toolchain = find_cc_toolchain(ctx)
# Needs to match logic for the :msan config_setting target. Unfortunately,
# config_setting rules don't yet have a Starlark API. Note that this is not
# equivalent to looking at ctx.var.get("msan_config"), we want to know if
# msan is used in this specific configuration, not if it's an msan build in
# general. (It might be better to look at whether msan is enabled in
# features with cc_common.configure_features (_cc_configure_features) and
# cc_common.is_enabled, but the important thing is that the behavior is
# consistent between this target and the targets in its image and runtime
# attrs.)
if cc_toolchain.compiler in ["msan", "msan-track-origins"]:
add_features = depset(["msan"], transitive = [add_features])
flags = ctx.actions.args()
flags.add(
"--compilation-mode",
ctx.var.get("LISP_COMPILATION_MODE", ctx.var["COMPILATION_MODE"]),
)
flags.add("--bindir", ctx.bin_dir.path)
flags.add_joined("--features", add_features, join_with = " ")
if (instrument_coverage > 0 or
(instrument_coverage < 0 and ctx.coverage_instrumented())):
flags.add("--coverage")
if verbose_level > 0:
flags.add("--verbose", str(verbose_level))
if int(ctx.var.get("LISP_BUILD_FORCE", "0")) > 0:
flags.add("--force")
return flags
def _list_excluding_depset(items, exclude):
exclude_set = {item: True for item in exclude.to_list()}
return [item for item in items if item not in exclude_set]
def lisp_compile_srcs(
ctx,
srcs = [],
deps = [],
cdeps = [],
block_compile = False,
block_compile_specified = False,
entry_points = [],
image = None,
add_features = [],
nowarn = [],
order = "serial",
compile_data = [],
verbose_level = 0,
instrument_coverage = -1,
indexer_metadata = []):
"""Generate LispCompile actions, return LispInfo and FASL output.
This is the core functionality shared by the Lisp build rules.
Args:
ctx: The rule context.
srcs: list of src Files.
deps: list of immediate Lisp dependency Targets.
cdeps: list of immediate C++ dependency Targets.
image: Build image Target used to compile the sources.
add_features: list of Lisp feature strings added by this target.
nowarn: List of suppressed warning type strings.
order: Order in which to load sources, either "serial", "parallel", or
"multipass".
compile_data: list of data dependency Targets whose outputs and runfiles
are made available at load/compile time for this target and its
consumers.
verbose_level: int indicating level of debugging output.
instrument_coverage: Controls coverage instrumentation, with the following values:
-1 (default) - Instruments if coverage is enabled for this target.
0 - Instruments never.
1 - Instruments always (for testing purposes).
indexer_metadata: Extra metadata files to be passed to the --deps
flag of LispCompile when the Kythe indexer is run. Ignored by the
build image itself, but this appears in the command-line for the
LispCompile action which can be inspected by action_listener.
Returns:
struct with fields:
- lisp_info: LispInfo for the target
- output_fasl: Combined FASL for this target (which is also included in
lisp_info.fasls if there are srcs)
- build_flags: Args to pass to all LispCompile and LispCore actions
"""
if not order in _COMPILATION_ORDERS:
fail("order {} must be one of {}".format(order, _COMPILATION_ORDERS))
if block_compile and block_compile_specified:
fail("cannot block-compile while block-compiling with :specified")
if entry_points and not block_compile:
fail("cannot use entry points without block-compiling")
name = ctx.label.name
verbosep = verbose_level > 0
indexer_build = (ctx.var.get("GROK_ELLIPSIS_BUILD", "0") == "1")
lisp_info = collect_lisp_info(
deps = deps,
cdeps = cdeps,
build_image = image,
features = add_features,
compile_data = compile_data,
)
build_flags = _build_flags(
ctx = ctx,
add_features = lisp_info.features,
verbose_level = verbose_level,
instrument_coverage = instrument_coverage,
)
if not srcs:
return struct(
lisp_info = lisp_info,
output_fasl = None,
build_flags = build_flags,
)
multipass = (order == "multipass") or block_compile
serial = (order == "serial") and not multipass
build_image = image[DefaultInfo].files_to_run
compile_image = build_image
# Lisp source files for all the transitive dependencies not already in the
# image, loaded before compilation, passed to --deps.
deps_srcs = lisp_info.srcs.to_list()
if LispInfo in image:
deps_srcs = _list_excluding_depset(deps_srcs, image[LispInfo].srcs)
if indexer_build:
deps_srcs.extend(indexer_metadata)
# Sources for this target loaded before compilation (after deps), passed to
# --load. What this contains depends on the compilation order:
# multipass: Contains everything
# parallel: Contains nothing
# serial: Contains previous entries in srcs (accumulated below)
load_srcs = srcs if multipass else []
# Arbitrary heuristic to reduce load on the build system by bundling
# FASL and source files load into one compile-image binary.
compile_flags = ctx.actions.args()
if multipass:
nowarn = nowarn + ["redefined-method", "redefined-function"]
# buildozer: disable=print
if verbosep:
print("Target: " + name)
print("Build Img: " + build_image.executable.short_path)
print("Compile Img: " + compile_image.executable.short_path)
fasls = []
warnings = []
hashes = []
output_fasl = ctx.actions.declare_file(name + ".fasl")
if not block_compile:
for src in srcs:
stem = "{}~/{}".format(name, src.short_path[:-len(src.extension) - 1])
if len(srcs) == 1:
fasl = output_fasl
else:
fasl = ctx.actions.declare_file(stem + ".fasl")
fasls.append(fasl)
hashes.append(ctx.actions.declare_file(stem + ".hash"))
warnings.append(ctx.actions.declare_file(stem + ".warnings"))
outs = [fasls[-1], hashes[-1], warnings[-1]]
file_flags = ctx.actions.args()
file_flags.add_joined("--outs", outs, join_with = " ")
file_flags.add("--srcs", src)
file_flags.add_joined("--deps", deps_srcs, join_with = " ")
file_flags.add_joined("--load", load_srcs, join_with = " ")
file_flags.add_joined("--nowarn", nowarn, join_with = " ")
if block_compile_specified:
file_flags.add("--block-compile-specified")
direct_inputs = [src]
direct_inputs.extend(deps_srcs)
direct_inputs.extend(load_srcs)
ctx.actions.run(
outputs = outs,
inputs = depset(
direct_inputs,
transitive = [lisp_info.compile_data],
order = "preorder",
),
progress_message = "Compiling %{input}",
mnemonic = "LispCompile",
env = _BAZEL_LISP_IMAGE_ENV,
arguments = ["compile", build_flags, compile_flags, file_flags],
executable = compile_image,
)
if serial:
load_srcs.append(src)
else:
stem = "{}~/".format(name)
fasls.append(output_fasl)
hashes.extend([ctx.actions.declare_file(stem + src.short_path[:-len(src.extension) - 1] + ".hash") for src in srcs])
warnings.append(ctx.actions.declare_file(stem + name + ".warnings"))
outs = [fasls[0], warnings[0]] + hashes
file_flags = ctx.actions.args()
file_flags.add_joined("--outs", outs, join_with = " ")
file_flags.add_joined("--srcs", srcs, join_with = " ")
file_flags.add_joined("--deps", deps_srcs, join_with = " ")
file_flags.add_joined("--load", load_srcs, join_with = " ")
file_flags.add_joined("--nowarn", nowarn, join_with = " ")
if entry_points:
file_flags.add_joined("--entry-points", entry_points, join_with = " ")
direct_inputs = [s for s in srcs]
direct_inputs.extend(deps_srcs)
direct_inputs.extend(load_srcs)
ctx.actions.run(
outputs = outs,
inputs = depset(
direct_inputs,
transitive = [lisp_info.compile_data],
),
progress_message = "Compiling " + str([x.short_path for x in srcs]),
mnemonic = "LispCompile",
env = _BAZEL_LISP_IMAGE_ENV,
arguments = ["block-compile", build_flags, compile_flags, file_flags],
executable = compile_image,
)
if indexer_build:
srcs = indexer_metadata + srcs
lisp_info = extend_lisp_info(
lisp_info,
srcs = srcs,
fasls = [output_fasl] if srcs else [],
hashes = hashes,
warnings = warnings,
)
return struct(
lisp_info = lisp_info,
output_fasl = _concat_fasls(ctx, fasls, output_fasl),
build_flags = build_flags,
)
def _cc_configure_features(ctx, cc_toolchain):
return cc_common.configure_features(
ctx = ctx,
cc_toolchain = cc_toolchain,
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
# DEPS file is used to list all the Lisp sources for a target.
# It is a quick hack to make (bazel:load ...) work.
def _lisp_deps_manifest(ctx, lisp_info):
"""Creates a file that lists all Lisp files needed by the target in order."""
out = ctx.actions.declare_file(ctx.label.name + ".deps")
content = ctx.actions.args()
content.set_param_file_format("multiline")
content.add_joined(
lisp_info.features,
join_with = "\n",
format_each = "feature: %s",
)
content.add_joined(
lisp_info.srcs,
join_with = "\n",
format_each = "src: %s",
)
ctx.actions.write(
output = out,
content = content,
)
return out
def _lisp_dynamic_library(ctx, lisp_info):
cc_toolchain = find_cc_toolchain(ctx)
feature_configuration = _cc_configure_features(ctx, cc_toolchain)
linking_outputs = cc_common.link(
name = ctx.label.name,
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
linking_contexts = [lisp_info.cc_info.linking_context],
output_type = "dynamic_library",
grep_includes = ctx.executable._grep_includes,
)
return linking_outputs.library_to_link.dynamic_library
def _lisp_output_group_info(ctx, lisp_info, fasl_list):
outputs = {"fasl": fasl_list}
# Additional outputs for dynamic loading. These should only be used when
# explicitly requested, so condition the generation of the extra actions
# on a flag. (It might be better to just condition this on --output_groups,
# but that's not readable from Starlark.)
generate_dynamic_load_outputs = (
ctx.attr._additional_dynamic_load_outputs[BuildSettingInfo].value
)
if generate_dynamic_load_outputs:
outputs["deps_manifest"] = [_lisp_deps_manifest(ctx, lisp_info)]
outputs["dynamic_library"] = [_lisp_dynamic_library(ctx, lisp_info)]
return OutputGroupInfo(**outputs)
def _lisp_instrumented_files_info(ctx):
return coverage_common.instrumented_files_info(
ctx,
source_attributes = ["srcs"],
dependency_attributes = ["deps", "cdeps", "image", "data"],
)
def _lisp_runfiles(ctx):
runfiles = ctx.runfiles(files = ctx.files.data)
transitive_runfiles = []
for runfiles_attr in (
ctx.attr.srcs,
ctx.attr.deps,
ctx.attr.cdeps,
ctx.attr.data,
):
for target in runfiles_attr:
transitive_runfiles.append(target[DefaultInfo].default_runfiles)
transitive_runfiles.append(ctx.attr.image[DefaultInfo].default_runfiles)
# return runfiles.merge_all(transitive_runfiles)
# obtaining errors that runfiles has no field or method 'merge_all' (did you mean 'merge'?)
# so lets try to use merger
for file in transitive_runfiles:
runfiles = runfiles.merge(file)
return runfiles
def _lisp_providers(ctx, lisp_info, fasl, executable = None):
executable_list = [executable] if executable != None else []
fasl_list = [fasl] if fasl != None else []
return [
DefaultInfo(
runfiles = _lisp_runfiles(ctx),
files = depset(executable_list or fasl_list),
executable = executable,
),
lisp_info,
_lisp_output_group_info(ctx, lisp_info, fasl_list),
_lisp_instrumented_files_info(ctx),
]
################################################################################
# Lisp Binary and Lisp Test
################################################################################
def _lisp_binary_impl(ctx):
"""Implementation for lisp_binary and lisp_test rules."""
name = ctx.label.name
core = ctx.actions.declare_file(name + ".core")
verbose_level = max(
ctx.attr.verbose,
int(ctx.var.get("VERBOSE_LISP_BUILD", "0")),
)
verbosep = verbose_level > 0
# buildozer: disable=print
if verbosep:
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Core: %s" % core)
compile = lisp_compile_srcs(
ctx = ctx,
srcs = ctx.files.srcs,
deps = ctx.attr.deps,
cdeps = ctx.attr.cdeps,
image = ctx.attr.image,
add_features = ctx.attr.add_features,
nowarn = ctx.attr.nowarn,
order = ctx.attr.order,
compile_data = ctx.attr.compile_data,
verbose_level = verbose_level,
instrument_coverage = ctx.attr.instrument_coverage,
)
# TODO(czak): Add --hashes, and --warnings flags to bazl.main.
lisp_info = compile.lisp_info
fasls = lisp_info.fasls.to_list()
hashes = lisp_info.hashes.to_list()
warnings = lisp_info.warnings.to_list()
if LispInfo in ctx.attr.image:
# The image already includes some deps.
included = ctx.attr.image[LispInfo]
fasls = _list_excluding_depset(fasls, included.fasls)
hashes = _list_excluding_depset(hashes, included.hashes)
warnings = _list_excluding_depset(warnings, included.warnings)
build_image = ctx.file.image
# buildozer: disable=print
if verbosep:
print("Build image: %s" % build_image.short_path)
specs = ctx.actions.declare_file(name + ".specs")
content = ctx.actions.args()
content.set_param_file_format("multiline")
content.add_joined(fasls, format_joined = '(:deps\n "%s")', join_with = '"\n "')
content.add_joined(warnings, format_joined = '(:warnings\n "%s")', join_with = '"\n "')
content.add_joined(hashes, format_joined = '(:hashes\n "%s")', join_with = '"\n "')
ctx.actions.write(
output = specs,
content = content,
)
inputs = [specs]
inputs.extend(fasls)
inputs.extend(hashes)
inputs.extend(warnings)
inputs = depset(inputs, transitive = [lisp_info.compile_data])
core_flags = ctx.actions.args()
core_flags.add("--specs", specs)
core_flags.add("--outs", core)
core_flags.add("--main", ctx.attr.main)
core_flags.add_joined("--nowarn", ctx.attr.nowarn, join_with = " ")
if ctx.attr.precompile_generics:
core_flags.add("--precompile-generics")
if ctx.attr.save_runtime_options:
core_flags.add("--save-runtime-options")
ctx.actions.run(
outputs = [core],
inputs = inputs,
progress_message = "Building Lisp core %{output}",
mnemonic = "LispCore",
env = _BAZEL_LISP_IMAGE_ENV,
arguments = ["core", compile.build_flags, core_flags],
executable = build_image,
)
cc_toolchain = find_cc_toolchain(ctx)
feature_configuration = _cc_configure_features(ctx, cc_toolchain)
# The support in ELFinator exists for -pie code, and a non-elfinated SBCL binaries
# are always position-independent; however, extreme inefficiency is imparted to ELF
# binaries that are position-independent. Lisp pointers are all absolute, and a
# typical Lisp heap might contain 3 to 5 million pointers to functions, therefore
# require that many relocations on each invocation to adjust to wherever the system
# moved the text segment. C on the other hand uses function pointers sparingly.
# I don't have "typical" numbers of pointers, and it can't be inferred from a binary,
# but it's nothing like having 40,000 closures over #<FUNCTION ALWAYS-BOUND {xxxxxx}>
# (which is the SLOT-BOUNDP method "fast method function" for every defstruct slot)
# and another 40,000 over CALL-NEXT-METHOD and so on and so on.
linkopts = ["-Wl,-no-pie"]
# Transform the .core file into a -core.o file, so that can be linked in
# with the C++ dependencies.
core_object_file = ctx.actions.declare_file(name + "-core.o")
link_additional_inputs = []
compilation_outputs = [
cc_common.create_compilation_outputs(
# This file contains the SBCL core, essentially as '.data' in the
# object file, so it can be linked as PIC or not. For the other
# dependencies, we still want the link action to choose normally
# between PIC and non-PIC outputs.
objects = depset([core_object_file]),
pic_objects = depset([core_object_file]),
),
]
elfinate_args = ctx.actions.args()
if ctx.attr.allow_save_lisp:
# If we want to allow the binary to be used as a compilation image, the
# Lisp image has to stay in a form save-lisp-and-die understands. In
# this case, copy the entire native SBCL core into a binary blob in a
# normal '.o' file.
linker_script_file = ctx.actions.declare_file(name + "-syms.lds")
link_additional_inputs.append(linker_script_file)
elfinate_outs = [core_object_file, linker_script_file]
elfinate_cmd = (
"$1 copy $2 $3 && nm -p $3 | " +
"awk '" +
'{print $2";"}BEGIN{print "{"}END{print "};"}' +
"' > $4"
)
elfinate_args.add(ctx.executable._elfinate)
elfinate_args.add(core)
elfinate_args.add(core_object_file)
elfinate_args.add(linker_script_file)
linkopts.append(
"-Wl,--dynamic-list={}".format(linker_script_file.path),
)
else:
# Otherwise, produce a '.s' file holding only compiled Lisp code and a
# '-core.o' containing the balance of the original Lisp spaces.
assembly_file = ctx.actions.declare_file(name + ".s")
elfinate_outs = [assembly_file, core_object_file]
elfinate_cmd = "$1 split $2 $3"
elfinate_args.add(ctx.executable._elfinate)
elfinate_args.add(core)
elfinate_args.add(assembly_file)
# The .s file will get re-assembled before it's linked into the binary.
# Note that this cc_common.compile action is declared before the
# action below which runs elfinate to create this input. The elfinate
# action still ends up first when the graph of actions is computed.
compilation_context, asm_compilation_output = cc_common.compile(
name = name,
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
srcs = [assembly_file],
)
compilation_outputs.append(asm_compilation_output)
ctx.actions.run_shell(
outputs = elfinate_outs,
tools = [ctx.executable._elfinate],
inputs = [core],
command = elfinate_cmd,
arguments = [elfinate_args],
progress_message = "Elfinating Lisp core %{output}",
mnemonic = "LispElfinate",
)
# The rule's malloc attribute can be overridden by the --custom_malloc flag.
malloc = ctx.attr._custom_malloc or ctx.attr.malloc
linking_outputs = cc_common.link(
name = name,
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
user_link_flags = linkopts,
# compilation_outpus contains all the Lisp code. If allow_save_lisp,
# it's all in the -core.o file. Otherwise, the compiled Lisp code was
# disassembled and reassembled, and this contains the output from that
# plus the remainder of the Lisp core in the -core.o file.
compilation_outputs = cc_common.merge_compilation_outputs(
compilation_outputs = compilation_outputs,
),
# linking_contexts contains all the C++ code to be linked in.
linking_contexts = [
# C++ code from transitive dependencies.
lisp_info.cc_info.linking_context,
# SBCL's C++ dependencies.
ctx.attr.runtime[CcInfo].linking_context,
# A custom malloc library gets linked in like any other library.
# It's important that each binary gets a single malloc
# implementation, so this does not get propagated to any of the
# binary's consumers.
malloc[CcInfo].linking_context,
],
stamp = ctx.attr.stamp,
output_type = "executable",
additional_inputs = link_additional_inputs,
grep_includes = ctx.executable._grep_includes,
)
return _lisp_providers(
ctx = ctx,
lisp_info = lisp_info,
fasl = compile.output_fasl,
executable = linking_outputs.executable,
)
lisp_binary = rule(
implementation = _lisp_binary_impl,
executable = True,
attrs = _LISP_BINARY_ATTRS,
fragments = ["cpp"],
toolchains = ["@bazel_tools//tools/cpp:toolchain_type"],
incompatible_use_toolchain_transition = True,
doc = """
Supports all of the same attributes as [`lisp_library`](#lisp_library), plus
additional attributes governing the behavior of the completed binary. The
[`main`](#lisp_binary-main) attribute defines behavior (generally specifying a
function to run with no arguments) when the binary is started. By default, it
runs `(cl-user::main)`.
Example:
lisp_binary(
name = "binary"
srcs = ["binary.lisp"],
main = "binary:main",
deps = [":library"],
)""",
)
lisp_test = rule(
implementation = _lisp_binary_impl,
executable = True,
test = True,
attrs = _LISP_TEST_ATTRS,
fragments = ["cpp"],
toolchains = ["@bazel_tools//tools/cpp:toolchain_type"],
incompatible_use_toolchain_transition = True,
doc = """
Like [`lisp_binary`](#lisp_binary), for defining tests to be run with the
[`test`](https://docs.bazel.build/versions/master/user-manual.html#test)
command. The [`main`](#lisp_test-main) attribute should name a function which
runs the tests, outputs information about failing assertions, and exits with a
non-zero exit status if there are any failures.
Example:
lisp_test(
name = "library-test"
srcs = ["library-test.lisp"],
main = "library-test:run-tests",
deps = [
":library",
"//path/to/unit-test:framework",
],
)""",
)
################################################################################
# Lisp Library
################################################################################
def _lisp_library_impl(ctx):
"""Lisp specific implementation for lisp_library rules."""
verbose_level = max(
getattr(ctx.attr, "verbose", 0),
int(ctx.var.get("VERBOSE_LISP_BUILD", "0")),
)
verbosep = verbose_level > 0
# buildozer: disable=print
if verbosep:
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
print("Library: %s" % ctx.label.name)
compile = lisp_compile_srcs(
ctx = ctx,
srcs = ctx.files.srcs,
deps = ctx.attr.deps,
cdeps = ctx.attr.cdeps,
block_compile = ctx.attr.experimental_block_compile,
block_compile_specified = ctx.attr.experimental_block_compile_specified,
entry_points = ctx.attr.entry_points,
image = ctx.attr.image,
add_features = ctx.attr.add_features,
nowarn = ctx.attr.nowarn,
order = ctx.attr.order,
compile_data = ctx.attr.compile_data,
verbose_level = verbose_level,
instrument_coverage = ctx.attr.instrument_coverage,
)
return _lisp_providers(
ctx = ctx,
lisp_info = compile.lisp_info,
fasl = compile.output_fasl,
)
lisp_library = rule(
implementation = _lisp_library_impl,
attrs = _LISP_LIBRARY_ATTRS,
fragments = ["cpp"],
toolchains = ["@bazel_tools//tools/cpp:toolchain_type"],
incompatible_use_toolchain_transition = True,
doc = """
The basic compilation unit for Lisp code. Can have Lisp dependencies
([`deps`](#lisp_library-deps)) and C/C++ dependencies
([`cdeps`](#lisp_library-cdeps)).
Example:
lisp_test(
name = "library"
srcs = ["library.lisp"],
cdeps = [":cc-dependency-ci"],
deps = [":dependency"],
)""",
)
```
|
{
"source": "Jeoungseungho/identity",
"score": 2
}
|
#### File: api/v1/project_group.py
```python
from spaceone.api.identity.v1 import project_group_pb2, project_group_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class ProjectGroup(BaseAPI, project_group_pb2_grpc.ProjectGroupServicer):
pb2 = project_group_pb2
pb2_grpc = project_group_pb2_grpc
def create(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
return self.locator.get_info('ProjectGroupInfo', project_group_svc.create(params))
def update(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
return self.locator.get_info('ProjectGroupInfo', project_group_svc.update(params))
def delete(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
project_group_svc.delete(params)
return self.locator.get_info('EmptyInfo')
def get(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
return self.locator.get_info('ProjectGroupInfo', project_group_svc.get(params))
def list(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
project_group_vos, total_count = project_group_svc.list(params)
return self.locator.get_info('ProjectGroupsInfo', project_group_vos, total_count,
minimal=self.get_minimal(params))
def stat(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
return self.locator.get_info('StatisticsInfo', project_group_svc.stat(params))
def add_member(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
return self.locator.get_info('ProjectGroupRoleBindingInfo', project_group_svc.add_member(params))
def modify_member(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
return self.locator.get_info('ProjectGroupRoleBindingInfo', project_group_svc.modify_member(params))
def remove_member(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
project_group_svc.remove_member(params)
return self.locator.get_info('EmptyInfo')
def list_members(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
project_group_map_vos, total_count = project_group_svc.list_members(params)
return self.locator.get_info('ProjectGroupRoleBindingsInfo', project_group_map_vos, total_count,
minimal=self.get_minimal(params))
def list_projects(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('ProjectGroupService', metadata) as project_group_svc:
project_vos, total_count = project_group_svc.list_projects(params)
return self.locator.get_info('ProjectGroupProjectsInfo', project_vos, total_count,
minimal=self.get_minimal(params))
```
#### File: api/v1/token.py
```python
from spaceone.api.identity.v1 import token_pb2, token_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class Token(BaseAPI, token_pb2_grpc.TokenServicer):
pb2 = token_pb2
pb2_grpc = token_pb2_grpc
def issue(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('TokenService', metadata) as token_svc:
data = token_svc.issue(params)
return self.locator.get_info('TokenInfo', data)
def refresh(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('TokenService', metadata) as token_svc:
data = token_svc.refresh(params)
return self.locator.get_info('TokenInfo', data)
```
#### File: identity/info/authorization_info.py
```python
from spaceone.core.pygrpc.message_type import *
__all__ = ['AuthorizationResponse']
def AuthorizationResponse(auth_data):
return change_handler_authorization_response(auth_data)
```
#### File: identity/info/endpoint_info.py
```python
import functools
from spaceone.api.identity.v1 import endpoint_pb2
__all__ = ['EndpointInfo', 'EndpointsInfo']
def EndpointInfo(endpoint_vo: dict, minimal=False):
info = {
'service': endpoint_vo['service'],
'name': endpoint_vo['name'],
'endpoint': endpoint_vo['endpoint'],
}
if not minimal:
info.update({
'state': endpoint_vo.get('state'),
'version': endpoint_vo.get('version'),
})
return endpoint_pb2.EndpointInfo(**info)
def EndpointsInfo(endpoint_vos, total_count, **kwargs):
results = list(map(functools.partial(EndpointInfo, **kwargs), endpoint_vos))
return endpoint_pb2.EndpointsInfo(results=results, total_count=total_count)
```
#### File: identity/info/service_account_info.py
```python
import functools
from spaceone.api.core.v1 import tag_pb2
from spaceone.api.identity.v1 import service_account_pb2
from spaceone.core.pygrpc.message_type import *
from spaceone.identity.model.service_account_model import ServiceAccount
from spaceone.identity.info.project_info import ProjectInfo
__all__ = ['ServiceAccountInfo', 'ServiceAccountsInfo']
def ServiceAccountInfo(service_account_vo: ServiceAccount, minimal=False):
info = {
'service_account_id': service_account_vo.service_account_id,
'name': service_account_vo.name,
'provider': service_account_vo.provider
}
if not minimal:
info.update({
'data': change_struct_type(service_account_vo.data),
'tags': [tag_pb2.Tag(key=tag.key, value=tag.value) for tag in service_account_vo.tags],
'domain_id': service_account_vo.domain_id,
'created_at': change_timestamp_type(service_account_vo.created_at)
})
if service_account_vo.project:
info.update({
'project_info': ProjectInfo(service_account_vo.project, minimal=True)
})
# Temporary code for DB migration
if not service_account_vo.project_id and service_account_vo.project:
service_account_vo.update({'project_id': service_account_vo.project.project_id})
return service_account_pb2.ServiceAccountInfo(**info)
def ServiceAccountsInfo(service_account_vos, total_count, **kwargs):
results = list(map(functools.partial(ServiceAccountInfo, **kwargs), service_account_vos))
return service_account_pb2.ServiceAccountsInfo(results=results, total_count=total_count)
```
#### File: identity/manager/role_binding_manager.py
```python
import logging
from spaceone.core import cache
from spaceone.core.manager import BaseManager
from spaceone.identity.error.error_role import *
from spaceone.identity.model.role_binding_model import *
from spaceone.identity.manager import RoleManager, ProjectManager, ProjectGroupManager, UserManager
_LOGGER = logging.getLogger(__name__)
_SUPPORTED_RESOURCE_TYPES = ['identity.User']
class RoleBindingManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.role_binding_model: RoleBinding = self.locator.get_model('RoleBinding')
def create_role_binding(self, params):
def _rollback(role_binding_vo):
_LOGGER.info(f'[create_role_binding._rollback] Delete role binding : {role_binding_vo.name} ({role_binding_vo.role_binding_id})')
role_binding_vo.delete()
resource_type = params['resource_type']
resource_id = params['resource_id']
role_id = params['role_id']
project_id = params.get('project_id')
project_group_id = params.get('project_group_id')
domain_id = params['domain_id']
role_mgr: RoleManager = self.locator.get_manager('RoleManager')
project_mgr: ProjectManager = self.locator.get_manager('ProjectManager')
project_group_mgr: ProjectGroupManager = self.locator.get_manager('ProjectGroupManager')
user_mgr: UserManager = self.locator.get_manager('UserManager')
self._check_resource_type(resource_type)
if resource_type == 'identity.User':
params['user'] = user_mgr.get_user(resource_id, domain_id)
role_vo = role_mgr.get_role(role_id, domain_id)
self._check_role_type(role_vo.role_type, resource_type, resource_id, domain_id)
params['role'] = role_vo
if role_vo.role_type == 'PROJECT':
if project_id:
project_vo = project_mgr.get_project(project_id, domain_id)
self._check_duplicate_project_role(resource_type, resource_id, project_vo, project_id)
params['project'] = project_vo
elif project_group_id:
project_group_vo = project_group_mgr.get_project_group(project_group_id, domain_id)
self._check_duplicate_project_group_role(resource_type, resource_id, project_group_vo, project_group_id)
params['project_group'] = project_group_vo
else:
raise ERROR_REQUIRED_PROJECT_OR_PROJECT_GROUP()
else:
self._check_duplicate_domain_or_system_role(resource_type, resource_id, role_vo, role_id)
if project_id:
raise ERROR_NOT_ALLOWED_PROJECT_ID()
elif project_group_id:
raise ERROR_NOT_ALLOWED_PROJECT_GROUP_ID()
role_binding_vo = self.role_binding_model.create(params)
self.transaction.add_rollback(_rollback, role_binding_vo)
cache.delete_pattern(f'role-bindings:{domain_id}:{resource_id}*')
cache.delete_pattern(f'user-permissions:{domain_id}:{resource_id}*')
cache.delete_pattern(f'user-scopes:{domain_id}:{resource_id}*')
return role_binding_vo
def update_role_binding(self, params):
role_binding_vo = self.get_role_binding(params['role_binding_id'], params['domain_id'])
return self.update_role_binding_by_vo(params, role_binding_vo)
def update_role_binding_by_vo(self, params, role_binding_vo):
def _rollback(old_data):
_LOGGER.info(f'[update_role_binding._rollback] Revert Data : {old_data["role_binding_id"]}')
role_binding_vo.update(old_data)
self.transaction.add_rollback(_rollback, role_binding_vo.to_dict())
return role_binding_vo.update(params)
def delete_role_binding(self, role_binding_id, domain_id):
role_binding_vo = self.get_role_binding(role_binding_id, domain_id)
self.delete_role_binding_by_vo(role_binding_vo)
def delete_role_binding_by_vo(self, role_binding_vo):
resource_id = role_binding_vo.resource_id
domain_id = role_binding_vo.domain_id
role_binding_vo.delete()
cache.delete_pattern(f'role-bindings:{domain_id}:{resource_id}*')
cache.delete_pattern(f'user-permissions:{domain_id}:{resource_id}*')
cache.delete_pattern(f'user-scopes:{domain_id}:{resource_id}*')
def get_role_binding(self, role_binding_id, domain_id, only=None):
return self.role_binding_model.get(role_binding_id=role_binding_id, domain_id=domain_id, only=only)
def get_project_role_binding(self, resource_type, resource_id, domain_id, project_vo=None, project_group_vo=None):
return self.role_binding_model.filter(resource_type=resource_type, resource_id=resource_id, domain_id=domain_id,
project=project_vo, project_group=project_group_vo)
def get_user_role_bindings(self, user_id, domain_id):
return self.role_binding_model.filter(resource_type='identity.User', resource_id=user_id, domain_id=domain_id)
def list_role_bindings(self, query):
return self.role_binding_model.query(**query)
def stat_role_bindings(self, query):
return self.role_binding_model.stat(**query)
@staticmethod
def _check_resource_type(resource_type):
if resource_type not in _SUPPORTED_RESOURCE_TYPES:
raise ERROR_INVALID_PARAMETER(
key='resource_type', reason=f'resource_type is not supported. (support = {_SUPPORTED_RESOURCE_TYPES})')
def _check_role_type(self, role_type, resource_type, resource_id, domain_id):
role_binding_vos = self.role_binding_model.filter(resource_type=resource_type,
resource_id=resource_id, domain_id=domain_id)
for role_binding_vo in role_binding_vos:
if role_type == 'SYSTEM':
if role_binding_vo.role.role_type in ['PROJECT', 'DOMAIN']:
raise ERROR_NOT_ALLOWED_ROLE_TYPE()
else:
if role_binding_vo.role.role_type == 'SYSTEM':
raise ERROR_NOT_ALLOWED_ROLE_TYPE()
def _check_duplicate_domain_or_system_role(self, resource_type, resource_id, role_vo, role_id):
rb_vos = self.role_binding_model.filter(resource_type=resource_type, resource_id=resource_id, role=role_vo)
if rb_vos.count() > 0:
raise ERROR_DUPLICATE_ROLE_BOUND(role_id=role_id, resource_id=resource_id)
def _check_duplicate_project_role(self, resource_type, resource_id, project_vo, project_id):
project_rb_vos = self.role_binding_model.filter(resource_type=resource_type, resource_id=resource_id,
project=project_vo)
if project_rb_vos.count() > 0:
raise ERROR_DUPLICATE_RESOURCE_IN_PROJECT(project_id=project_id, resource_id=resource_id)
def _check_duplicate_project_group_role(self, resource_type, resource_id, project_group_vo, project_group_id):
pg_rb_vos = self.role_binding_model.filter(resource_type=resource_type, resource_id=resource_id,
project_group=project_group_vo)
if pg_rb_vos.count() > 0:
raise ERROR_DUPLICATE_RESOURCE_IN_PROJECT_GROUP(project_group_id=project_group_id,
resource_id=resource_id)
```
#### File: identity/service/domain_owner_service.py
```python
import pytz
from spaceone.core.service import *
from spaceone.core.error import *
from spaceone.identity.manager import DomainOwnerManager
@authentication_handler(exclude=['create'])
@authorization_handler(exclude=['create'])
@mutation_handler(exclude=['create'])
@event_handler
class DomainOwnerService(BaseService):
def __init__(self, metadata):
super().__init__(metadata)
self.domain_owner_mgr: DomainOwnerManager = self.locator.get_manager('DomainOwnerManager')
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['owner_id', 'password', 'domain_id'])
def create(self, params):
""" Create domain owner
Args:
params (dict): {
'owner_id': 'str',
'password': '<PASSWORD>',
'name': 'str',
'email': 'str',
'language': 'str',
'timezone': 'str',
'domain_id': 'str'
}
Returns:
domain_owner_vo (object)
"""
if 'timezone' in params:
self._check_timezone(params['timezone'])
return self.domain_owner_mgr.create_owner(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['owner_id', 'domain_id'])
def update(self, params):
""" Update domain owner
Args:
params (dict): {
'owner_id': 'str',
'password': '<PASSWORD>',
'name': 'str',
'email': 'str',
'language': 'str',
'timezone': 'str',
'domain_id': 'str'
}
Returns:
domain_owner_vo (object)
"""
if 'timezone' in params:
self._check_timezone(params['timezone'])
return self.domain_owner_mgr.update_owner(params)
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id', 'owner_id'])
def delete(self, params):
""" Delete domain owner
Args:
params (dict): {
'owner_id': 'str',
'domain_id': 'str'
}
Returns:
None
"""
self.domain_owner_mgr.delete_owner(params['domain_id'], params['owner_id'])
@transaction(append_meta={'authorization.scope': 'DOMAIN'})
@check_required(['domain_id'])
def get(self, params):
""" Delete domain owner
Args:
params (dict): {
'owner_id': 'str',
'domain_id': 'str',
'only': 'list'
}
Returns:
domain_owner_vo (object)
"""
return self.domain_owner_mgr.get_owner(params['domain_id'], params.get('owner_id'), params.get('only'))
@staticmethod
def _check_timezone(timezone):
if timezone not in pytz.all_timezones:
raise ERROR_INVALID_PARAMETER(key='timezone', reason='Timezone is invalid.')
```
#### File: test/api/test_authentication.py
```python
import json
import os
import unittest
import pprint
from google.protobuf.json_format import MessageToDict
from spaceone.core import utils, pygrpc
from spaceone.core.auth.jwt import JWTUtil
class TestAuthentication(unittest.TestCase):
config = utils.load_yaml_from_file(
os.environ.get('SPACEONE_TEST_CONFIG_FILE', './config.yml'))
pp = pprint.PrettyPrinter(indent=4)
domain = None
api_key_info = None
api_key = None
identity_v1 = None
owner_id = None
owner_pw = utils.generate_password()
owner_token = None
@classmethod
def setUpClass(cls):
super(TestAuthentication, cls).setUpClass()
endpoints = cls.config.get('ENDPOINTS', {})
cls.identity_v1 = pygrpc.client(endpoint=endpoints.get('identity', {}).get('v1'),
version='v1')
cls._create_domain()
cls._create_domain_owner()
cls._issue_owner_token()
@classmethod
def tearDownClass(cls):
super(TestAuthentication, cls).tearDownClass()
cls.identity_v1.DomainOwner.delete(
{
'domain_id': cls.domain.domain_id,
'owner_id': cls.owner_id
},
metadata=(('token', cls.owner_token),)
)
cls.identity_v1.Domain.delete(
{
'domain_id': cls.domain.domain_id
},
metadata=(('token', cls.owner_token),)
)
if cls.api_key_info:
cls.identity_v1.APIKey.delete(
{
'api_key_id': cls.api_key_info.api_key_id
},
metadata=(('token', cls.owner_token),)
)
@classmethod
def _create_domain(cls):
name = utils.random_string()
params = {
'name': name
}
cls.domain = cls.identity_v1.Domain.create(
params,
metadata=(('token', cls.owner_token),)
)
@classmethod
def _create_api_key(cls):
params = {
'domain_id': cls.domain.domain_id
}
api_key_info = cls.identity_v1.APIKey.create(
params,
metadata=(('token', cls.owner_token),)
)
cls.api_key_info = api_key_info
cls.api_key = api_key_info.api_key
@classmethod
def _create_domain_owner(cls):
cls.owner_id = utils.random_string()
params = {
'owner_id': cls.owner_id,
'password': <PASSWORD>,
'domain_id': cls.domain.domain_id
}
owner = cls.identity_v1.DomainOwner.create(
params
)
cls.domain_owner = owner
@classmethod
def _issue_owner_token(cls):
token_param = {
'user_type': 'DOMAIN_OWNER',
'user_id': cls.owner_id,
'credentials': {
'password': <PASSWORD>
},
'domain_id': cls.domain.domain_id
}
issue_token = cls.identity_v1.Token.issue(token_param)
cls.owner_token = issue_token.access_token
def setUp(self):
self.user = None
self.user_params = None
self.token = None
def tearDown(self):
if self.user:
print(f'[tearDown] Delete User. {self.user.user_id}')
self.identity_v1.User.delete(
{
'user_id': self.user.user_id,
'domain_id': self.domain.domain_id
},
metadata=(('token', self.owner_token),)
)
def _print_data(self, message, description=None):
print()
if description:
print(f'[ {description} ]')
self.pp.pprint(MessageToDict(message, preserving_proto_field_name=True))
def _create_user(self, user_type=None, backend=None):
self.user_params = {
'user_id': utils.random_string() + '@mz.co.kr',
'password': utils.generate_password(),
'name': 'Steven' + utils.random_string(),
'timezone': 'Asia/Seoul',
'user_type': user_type or 'USER',
'backend': backend or 'LOCAL',
'domain_id': self.domain.domain_id
}
self.user = self.identity_v1.User.create(
self.user_params,
metadata=(('token', self.owner_token),)
)
self._print_data(self.user, '_create_user')
def _issue_token(self):
params = {
'user_id': self.user.user_id,
'credentials': {
'password': <PASSWORD>['password']
},
'domain_id': self.domain.domain_id
}
self.token = self.identity_v1.Token.issue(params)
decoded = JWTUtil.unverified_decode(self.token.access_token)
print()
print('[ _issue_token: decoded token ]')
self.pp.pprint(decoded)
def _get_user(self):
params = {
'user_id': self.user.user_id
}
user = self.identity_v1.User.get(
params,
metadata=(
('token', self.token.access_token),
)
)
self._print_data(user, '_get_user')
def test_id_pw_authentication(self):
self._create_user()
self._issue_token()
self._get_user()
def test_get_public_key(self):
params = {
'domain_id': self.domain.domain_id
}
secret = self.identity_v1.Domain.get_public_key(params)
self.assertEqual(self.domain.domain_id, secret.domain_id)
key = json.loads(secret.public_key)
self.assertEqual("RSA", key['kty'])
```
#### File: test/api/test_domain_owner.py
```python
import os
import random
import unittest
from langcodes import Language
from spaceone.core import utils, pygrpc
from spaceone.core.unittest.runner import RichTestRunner
class TestDomainOwner(unittest.TestCase):
config = utils.load_yaml_from_file(
os.environ.get('SPACEONE_TEST_CONFIG_FILE', './config.yml'))
@classmethod
def setUpClass(cls):
super(TestDomainOwner, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestDomainOwner, cls).tearDownClass()
def setUp(self):
endpoints = self.config.get('ENDPOINTS', {})
self.identity_v1 = pygrpc.client(endpoint=endpoints.get('identity', {}).get('v1'),
version='v1')
self.domain = None
self.domain_owner = None
self._create_domain()
def tearDown(self):
if self.domain_owner:
print(f'[TearDown] Delete domain owner. (domain_id: {self.domain.domain_id})')
params = {
'domain_id': self.domain.domain_id,
'owner_id': self.domain_owner.owner_id
}
self.identity_v1.DomainOwner.delete(
params,
metadata=(('token', self.owner_token),)
)
if self.domain:
print(f'[TearDown] Delete domain. (domain_id: {self.domain.domain_id})')
self.identity_v1.Domain.delete(
{
'domain_id': self.domain.domain_id
},
metadata=(('token', self.owner_token),)
)
def _create_domain(self):
name = utils.random_string()
params = {
'name': name
}
self.domain = self.identity_v1.Domain.create(params)
def _issue_owner_token(self, owner_id, owner_pw):
token_params = {
'user_type': 'DOMAIN_OWNER',
'user_id': owner_id,
'credentials': {
'password': <PASSWORD>
},
'domain_id': self.domain.domain_id
}
issue_token = self.identity_v1.Token.issue(token_params)
self.owner_token = issue_token.access_token
def test_create_owner(self):
lang_code = random.choice(['zh-hans', 'jp', 'ko', 'en', 'es'])
language = Language.get(lang_code)
owner_id = utils.random_string()
params = {
'owner_id': owner_id,
'password': utils.generate_password(),
'name': 'Steven' + utils.random_string(),
'language': language.__str__(),
'timezone': 'Asia/Seoul',
'email': 'Steven' + utils.random_string() + '@mz.co.kr',
'domain_id': self.domain.domain_id
}
owner = self.identity_v1.DomainOwner.create(
params
)
self.domain_owner = owner
self.params = params
self.assertEqual(params['name'], self.domain_owner.name)
self._issue_owner_token(params['owner_id'], params['password'])
def test_failure_create_two_owners(self):
self.test_create_owner()
with self.assertRaises(Exception) as e:
self.test_create_owner()
self.assertIn("ERROR_NOT_UNIQUE", str(e.exception))
def test_update_owner(self):
self.test_create_owner()
self.params['owner_id'] = self.domain_owner.owner_id
self.params['name'] = '<NAME>'
owner = self.identity_v1.DomainOwner.update(
self.params,
metadata=(('token', self.owner_token),)
)
self.assertEqual(owner.name, self.params['name'])
def test_get_domain_owner(self):
self.test_create_owner()
params = {
'owner_id': self.domain_owner.owner_id,
'domain_id': self.domain.domain_id
}
owner = self.identity_v1.DomainOwner.get(
params,
metadata=(('token', self.owner_token),)
)
self.assertEqual(owner.name, self.domain_owner.name)
def test_get_domain_owner_with_domain_id(self):
self.test_create_owner()
params = {
'domain_id': self.domain.domain_id
}
owner = self.identity_v1.DomainOwner.get(
params,
metadata=(('token', self.owner_token),)
)
self.assertEqual(owner.name, self.domain_owner.name)
def test_failure_get_not_exist_owner(self):
self.test_create_owner()
params = {
'domain_id': 'no-domain',
'owner_id': 'no-owner'
}
with self.assertRaises(Exception) as e:
self.identity_v1.DomainOwner.get(
params,
metadata=(('token', self.owner_token),)
)
self.assertIn('ERROR_NOT_FOUND', str(e.exception))
if __name__ == "__main__":
unittest.main(testRunner=RichTestRunner)
```
#### File: test/api/test_endpoint.py
```python
import os
import unittest
import pprint
from google.protobuf.json_format import MessageToDict
from spaceone.core import utils, pygrpc
from spaceone.core.unittest.runner import RichTestRunner
class TestEndpoint(unittest.TestCase):
config = utils.load_yaml_from_file(
os.environ.get('SPACEONE_TEST_CONFIG_FILE', './config.yml'))
pp = pprint.PrettyPrinter(indent=4)
identity_v1 = None
domain = None
domain_owner = None
owner_id = None
owner_pw = None
owner_token = None
@classmethod
def setUpClass(cls):
super(TestEndpoint, cls).setUpClass()
endpoints = cls.config.get('ENDPOINTS', {})
cls.identity_v1 = pygrpc.client(endpoint=endpoints.get('identity', {}).get('v1'), version='v1')
cls._create_domain()
cls._create_domain_owner()
cls._issue_owner_token()
@classmethod
def tearDownClass(cls):
super(TestEndpoint, cls).tearDownClass()
cls.identity_v1.DomainOwner.delete(
{
'domain_id': cls.domain.domain_id,
'owner_id': cls.owner_id
},
metadata=(('token', cls.owner_token),)
)
print(f'>> delete domain owner: {cls.owner_id}')
if cls.domain:
cls.identity_v1.Domain.delete(
{
'domain_id': cls.domain.domain_id
},
metadata=(('token', cls.owner_token),)
)
print(f'>> delete domain: {cls.domain.name} ({cls.domain.domain_id})')
@classmethod
def _create_domain(cls):
name = utils.random_string()
params = {
'name': name
}
cls.domain = cls.identity_v1.Domain.create(params)
print(f'domain_id: {cls.domain.domain_id}')
print(f'domain_name: {cls.domain.name}')
@classmethod
def _create_domain_owner(cls):
cls.owner_id = utils.random_string()
cls.owner_pw = utils.generate_password()
owner = cls.identity_v1.DomainOwner.create({
'owner_id': cls.owner_id,
'password': <PASSWORD>,
'domain_id': cls.domain.domain_id
})
cls.domain_owner = owner
print(f'owner_id: {cls.owner_id}')
print(f'owner_pw: {cls.owner_pw}')
@classmethod
def _issue_owner_token(cls):
token_params = {
'user_type': 'DOMAIN_OWNER',
'user_id': cls.owner_id,
'credentials': {
'password': <PASSWORD>
},
'domain_id': cls.domain.domain_id
}
issue_token = cls.identity_v1.Token.issue(token_params)
cls.owner_token = issue_token.access_token
def setUp(self):
pass
def tearDown(self):
pass
def _print_data(self, message, description=None):
print()
if description:
print(f'[ {description} ]')
self.pp.pprint(MessageToDict(message, preserving_proto_field_name=True))
def test_list_endpoints(self):
params = {}
result = self.identity_v1.Endpoint.list(
params, metadata=(('token', self.owner_token),))
self._print_data(result, 'test_list_endpoints')
if __name__ == '__main__':
unittest.main(testRunner=RichTestRunner)
```
|
{
"source": "jeovazero/zoas-store-api-graphql",
"score": 2
}
|
#### File: flaskr/controllers/product.py
```python
from ..models import ProductModel
from flaskr import db
from flaskr.graphqlr.errors import INVALID_PRODUCT_ID, ZoasError
Session = db.session
class ProductController:
@staticmethod
def get(id: str):
product = Session.query(ProductModel).filter_by(id=id).first()
if not product:
raise ZoasError(INVALID_PRODUCT_ID)
else:
return product
```
#### File: graphqlr/cart/types.py
```python
from graphene import (
ObjectType,
List,
Int,
String,
Float,
InputObjectType,
relay,
Field,
)
from ..mixins import SessionMixin
from flaskr.models import ProductCartModel
from flaskr import db
DbSession = db.session
class PhotoProductCart(ObjectType):
"""A photo of a product in the cart"""
url = String()
class ProductCart(ObjectType, SessionMixin):
"""A product in the cart"""
product_id = Int()
title = String()
description = String()
price = Float()
quantity = Int()
photos = List(PhotoProductCart)
class Meta:
interfaces = (relay.Node,)
@classmethod
def get_node(cls, info, id):
prod_cart = (
DbSession.query(ProductCartModel)
.filter_by(cart_id=cls.sid(), product_id=id)
.first()
)
if prod_cart is not None:
return ProductCart(prod_cart)
return prod_cart
def __init__(self, prodcart):
self.id = prodcart.product_id
self.product_id = prodcart.product_id
self.title = prodcart.product.title
self.description = prodcart.product.description
self.price = prodcart.product.price
self.quantity = prodcart.quantity
self.photos = prodcart.product.photos
class Address(ObjectType):
"""A address of a customer"""
city = String()
country = String()
zipcode = String()
street = String()
number = String()
district = String()
class AddressInput(InputObjectType):
"""A address input of a customer"""
city = String(required=True)
country = String(required=True)
zipcode = String(required=True)
street = String(required=True)
number = String(required=True)
district = String(required=True)
class CreditCardInput(InputObjectType):
"""A credit card input of a customer"""
card_number = String(required=True)
expiration_date = String(required=True)
cvv = String(required=True)
class PurchaseResult(ObjectType):
"""A purchase result of a customer"""
customer = String()
address = Field(Address)
total_paid = Float()
products_paid = List(ProductCart)
```
#### File: tests/helpers/api.py
```python
from .func import encode_base64
def create_cart(client, uid):
return client.post(
"/graphql",
json={
"query": """
mutation {
createCart(input: {"""
f'clientMutationId: "{uid}"'
"""}) {
clientMutationId
confirmation
}
}
"""
},
)
def delete_cart(client, mutation_id):
return client.post(
"/graphql",
json={
"query": """
mutation{
deleteCart(input: {"""
f'clientMutationId: "{mutation_id}"'
"""}){
clientMutationId
confirmation
}
}
"""
},
)
def get_cart(client):
return client.post(
"/graphql",
json={
"query": """
query {
cart {
id
productId
quantity
price
photos {
url
}
}
}
"""
},
)
def put_product_cart(client, pid, qtd, uid):
id = encode_base64(f"Product:{pid}")
return client.post(
"/graphql",
json={
"query": """
mutation {
putProductToCart(input: {"""
f'id: "{id}", quantity: {qtd}, clientMutationId: "{uid}"'
"""}){
clientMutationId
payload{
id
productId
quantity
price
photos {
url
}
}
}
}
"""
},
)
def remove_product_cart(client, pid, uid):
id = encode_base64(f"ProductCart:{pid}")
return client.post(
"/graphql",
json={
"query": """
mutation {
removeProductOfCart(input: {"""
f'id: "{id}", clientMutationId: "{uid}"'
"""})
{
clientMutationId
payload{
id
productId
quantity
price
photos {
url
}
}
}
}
"""
},
)
def get_product(client, pid):
id = encode_base64(f"Product:{pid}")
return client.post(
"/graphql",
json={
"query": """
query {"""
f'product(id: "{id}")'
"""{
id
title
photos {
url
}
price
description
avaliable
avaliability
}
}
"""
},
)
def get_product_cart(client, pid):
id = encode_base64(f"ProductCart:{pid}")
return client.post(
"/graphql",
json={
"query": """
query {"""
f'node(id: "{id}")'
"""{
...on ProductCart {
id
title
productId
quantity
price
photos {
url
}
}
}
}
"""
},
)
def unpack_dict(d):
s = []
for key, val in d.items():
r = (
("{ " + unpack_dict(val) + " }")
if isinstance(d[key], dict)
else f'"{val}"'
)
s.append(f"{key}: {r}")
return ",".join(s)
def pay_cart(client, payload, mutation_id):
client_mutation_id = f'clientMutationId: "{mutation_id}"'
params = unpack_dict(payload) + ", " + client_mutation_id
mutation = (
"""
mutation {
payCart(input: {"""
f"{params}"
"""})
{
clientMutationId
payload{
customer
address {
city
country
zipcode
street
number
district
}
totalPaid
productsPaid {
id
productId
title
description
photos {
url
}
price
quantity
}
}
}
}
"""
)
return client.post("/graphql", json={"query": mutation})
```
#### File: zoas-store-api-graphql/tests/test_query_cart.py
```python
from .helpers import api
from .helpers.func import add_fake_cart_products
def test_query_get_cart(client):
# add fake cart with products in database
add_fake_cart_products(client)
# request
response = api.get_cart(client)
# json of response
json = response.get_json()
cart = json["data"]["cart"]
# asserts
assert len(cart) == 2
assert cart[0]["productId"] == 1
assert cart[1]["productId"] == 2
def test_invalid_session(client):
# add fake cart with products in database
add_fake_cart_products(client)
# Setting the invalid session id
with client.session_transaction() as session:
session["u"] = "fake_session"
# request
response = api.get_cart(client)
# json of response
json = response.get_json()
# asserts
assert json["data"]["cart"] is None
assert json["errors"] is not None
assert (
json["errors"][0]["message"] == "The session has expired or is invalid"
)
assert json["errors"][0]["code"] == "INVALID_SESSION"
```
|
{
"source": "jeoygin/gadget",
"score": 3
}
|
#### File: algorithms/rect/rect-area.py
```python
import os, sys
from rects import Rect, rect_area, load_rect_from_file, load_rect_from_path
def main(argv):
if len(argv) > 0:
checkfile(argv[0])
rects = load_rect_from_path(argv[0])
else:
rects = load_rect_from_file(sys.stdin)
print rect_area(rects)
def checkfile(path):
if not os.path.isfile(path):
print 'No such file: {0}'.format(path)
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: caffe/print/print.py
```python
import sys
import yaml
import pprint
def print_model(conf):
pp = pprint.PrettyPrinter()
pp.pprint(conf)
sys.path.insert(0, conf['caffeRoot'] + 'python')
import caffe
net = caffe.Net(conf['proto'], conf['model'], caffe.TEST)
print("\n======================================================")
print("blobs: {}\nparams: {}".format(net.blobs.keys(), net.params.keys()))
print("======================================================")
for param in conf['params']:
print '{}:'.format(param)
print 'weights are {} dimensional'.format(net.params[param][0].data.shape)
shape = net.params[param][0].data.shape
for i in range(shape[0]):
for j in range(shape[1]):
print '{}-{}:'.format(i+1, j+1)
print net.params[param][0].data[i][j]
print '\nbiases are {} dimensional'.format(net.params[param][1].data.shape)
print '{}'.format(net.params[param][1].data)
print("======================================================")
def main(argv):
if (len(argv) < 1):
print 'Usage: ' + sys.argv[0] + ' conf.yml'
sys.exit(1)
with open(argv[0], 'r') as f:
print_model(yaml.load(f))
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: gadget/yaodao/yaodao_dict.py
```python
import os
import sys
import json
import cookielib, urllib2, urllib
import getpass
import argparse
import hashlib
import requests
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
result.status = code
result.headers = headers
return result
fake_header = [
('User-Agent', 'Mozilla/5.0 (Macintosh Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36'),
('Content-Type', 'application/x-www-form-urlencoded'),
('Cache-Control', 'no-cache'),
('Accept', '*/*'),
('Connection', 'Keep-Alive'),
]
class YaoDaoDict(object):
def __init__(self, username, password, cookie_filename='youdao_cookie', local_store='store'):
self.username = username
self.password = password
self.cookie_filename = cookie_filename
self.local_store = local_store
self.cj = cookielib.LWPCookieJar(cookie_filename)
if os.access(cookie_filename, os.F_OK):
self.cj.load(cookie_filename, ignore_discard=True, ignore_expires=True)
self.opener = urllib2.build_opener(
SmartRedirectHandler(),
urllib2.HTTPHandler(debuglevel=0),
urllib2.HTTPSHandler(debuglevel=0),
urllib2.HTTPCookieProcessor(self.cj)
)
self.opener.addheaders = fake_header
def login(self):
self.cj.clear()
self.opener.open('http://account.youdao.com/login?back_url=http://dict.youdao.com&service=dict')
login_data = urllib.urlencode({
'app' : 'web',
'tp' : 'urstoken',
'cf' : '7',
'fr' : '1',
'ru' : 'http://dict.youdao.com',
'product' : 'DICT',
'type' : '1',
'um' : 'true',
'username' : self.username,
'password' : self.password,
'savelogin' : '1',
})
response = self.opener.open('https://logindict.youdao.com/login/acc/login', login_data)
if response.headers.get('Set-Cookie').find(self.username) > -1:
self.cj.save(self.cookie_filename, ignore_discard=True, ignore_expires=True)
return True
else:
return False
def query(self, keyfrom, key, word):
local_file = '{}/{}.json'.format(self.local_store, word.lower())
if os.access(local_file, os.F_OK):
try:
with open(local_file, 'r') as f:
data = json.load(f.read())
if 'basic' in data:
return data
except Exception:
pass
url = 'http://fanyi.youdao.com/openapi.do?keyfrom={}&key={}&type=data&doctype=json&version=1.1&q={}'.format(keyfrom, key, word)
r = requests.get(url)
if r.status_code == 200:
with open(local_file, 'w') as f:
f.write(json.dumps(r.json()))
return r.json()
else:
return None
def add_word(self, word, desc, phonetic, tags):
post_data = urllib.urlencode({
'word' : word,
'phonetic' : unicode(phonetic).encode('utf-8'),
'desc': unicode(desc).encode('utf-8'),
'tags' : unicode(tags).encode('utf-8'),
})
self.opener.addheaders = fake_header + [
('Referer', 'http://dict.youdao.com/wordbook/wordlist'),
]
response = self.opener.open('http://dict.youdao.com/wordbook/wordlist?action=add', post_data)
return response.headers.get('Location') == 'http://dict.youdao.com/wordbook/wordlist'
```
|
{
"source": "jepabe/Demo_earth2",
"score": 2
}
|
#### File: python/ee/_helpers.py
```python
import contextlib
import json
import sys
# pylint: disable=g-importing-member
from . import data
from . import oauth
from .apifunction import ApiFunction
from .ee_exception import EEException
# pylint: enable=g-importing-member
import six
from google.auth import crypt
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
def _GetPersistentCredentials():
"""Read persistent credentials from ~/.config/earthengine.
Raises EEException with helpful explanation if credentials don't exist.
Returns:
OAuth2Credentials built from persistently stored refresh_token
"""
try:
tokens = json.load(open(oauth.get_credentials_path()))
refresh_token = tokens['refresh_token']
return Credentials(
None,
refresh_token=refresh_token,
token_uri=oauth.TOKEN_URI,
client_id=oauth.CLIENT_ID,
client_secret=oauth.CLIENT_SECRET,
scopes=oauth.SCOPES)
except IOError:
raise EEException('Please authorize access to your Earth Engine account '
'by running\n\nearthengine authenticate\n\nin your '
'command line, and then retry.')
def ServiceAccountCredentials(email, key_file=None, key_data=None):
"""Configure OAuth2 credentials for a Google Service Account.
Args:
email: The email address of the account for which to configure credentials.
Ignored if key_file or key_data represents a JSON service account key.
key_file: The path to a file containing the private key associated with
the service account. Both JSON and PEM files are supported.
key_data: Raw key data to use, if key_file is not specified.
Returns:
An OAuth2 credentials object.
"""
# Assume anything that doesn't end in '.pem' is a JSON key.
if key_file and not key_file.endswith('.pem'):
return service_account.Credentials.from_service_account_file(
key_file, scopes=oauth.SCOPES)
# If 'key_data' can be decoded as JSON, it's probably a raw JSON key.
if key_data:
try:
key_data = json.loads(key_data)
return service_account.Credentials.from_service_account_info(
key_data, scopes=oauth.SCOPES)
except ValueError:
# It may actually be a raw PEM string, we'll try that below.
pass
# Probably a PEM key - just read the file into 'key_data'.
if key_file:
with open(key_file, 'r') as file_:
key_data = file_.read()
# Raw PEM key.
signer = crypt.RSASigner.from_string(key_data)
return service_account.Credentials(
signer, email, oauth.TOKEN_URI, scopes=oauth.SCOPES)
def call(func, *args, **kwargs):
"""Invoke the given algorithm with the specified args.
Args:
func: The function to call. Either an ee.Function object or the name of
an API function.
*args: The positional arguments to pass to the function.
**kwargs: The named arguments to pass to the function.
Returns:
A ComputedObject representing the called function. If the signature
specifies a recognized return type, the returned value will be cast
to that type.
"""
if isinstance(func, six.string_types):
func = ApiFunction.lookup(func)
return func.call(*args, **kwargs)
def apply(func, named_args): # pylint: disable=redefined-builtin
"""Call a function with a dictionary of named arguments.
Args:
func: The function to call. Either an ee.Function object or the name of
an API function.
named_args: A dictionary of arguments to the function.
Returns:
A ComputedObject representing the called function. If the signature
specifies a recognized return type, the returned value will be cast
to that type.
"""
if isinstance(func, six.string_types):
func = ApiFunction.lookup(func)
return func.apply(named_args)
@contextlib.contextmanager
def profilePrinting(destination=sys.stderr):
# pylint: disable=g-doc-return-or-yield
"""Returns a context manager that prints a profile of enclosed API calls.
The profile will be printed when the context ends, whether or not any error
occurred within the context.
# Simple example:
with ee.profilePrinting():
print ee.Number(1).add(1).getInfo()
Args:
destination: A file-like object to which the profile text is written.
Defaults to sys.stderr.
"""
# TODO(user): Figure out why ee.Profile.getProfiles isn't generated and fix
# that.
getProfiles = ApiFunction.lookup('Profile.getProfiles')
profile_ids = []
try:
with data.profiling(profile_ids.append):
yield
finally:
profile_text = getProfiles.call(ids=profile_ids).getInfo()
destination.write(profile_text)
```
|
{
"source": "jepayne1138/ModularMailer-ClassifierNetwork",
"score": 2
}
|
#### File: ModularMailer-ClassifierNetwork/classifiernetwork/console.py
```python
import argparse
import sys
import classifiernetwork.defaults as defaults
SUPPORTED_OBJECTIVES = (
'mean_squared_error',
'mse',
'mean_absolute_error',
'mae',
'mean_absolute_percentage_error',
'mape',
'mean_squared_logarithmic_error',
'msle',
'squared_hinge',
'hinge',
'binary_crossentropy',
'categorical_crossentropy',
'sparse_categorical_crossentropy',
'kullback_leibler_divergence',
'kld',
'poisson',
'cosine_proximity',
)
SUPPORTED_ACTIVATIONS = (
'softmax',
'softplus',
'softsign',
'relu',
'tanh',
'sigmoid',
'hard_sigmoid',
'linear',
)
def _build_training_subparser(train_parser):
"""Create the options for the 'train' subparser"""
train_parser.add_argument(
'input_vectors', type=str,
help='Path to the numpy array of input vectors (.npy file).'
)
train_parser.add_argument(
'output_vectors', type=str,
help='path to the numpy array of output vectors (.npy file)'
)
train_parser.add_argument(
'save_name', type=str, help='Save trained network file name.'
)
train_parser.add_argument(
'-o', '--output-directory', type=str,
help='Directory for output file. Defaults to input_vectors location.'
)
# Network compilation option
compile_group = train_parser.add_argument_group(
title='Compilation options',
description='Options for the structure of the network.'
)
compile_group.add_argument(
'-i', '--hidden-size', type=int,
help='Size of the hidden layer. Defaults to geometric_mean(in, out).'
)
compile_group.add_argument(
'-a', '--activation', type=str,
default=defaults.ACTIVATION, choices=SUPPORTED_ACTIVATIONS,
help='Activation function for the hidden layer (see Keras docs).'
)
compile_group.add_argument(
'-p', '--dropout', type=float, default=defaults.DROPOUT,
help='Fraction of the input units to drop.'
)
compile_group.add_argument(
'-l', '--loss', type=str,
default=defaults.LOSS, choices=SUPPORTED_OBJECTIVES,
help='The string identifier of an optimizer (see Keras docs).'
)
# Options for the stochastic gradient descent optimizer
sgd_group = train_parser.add_argument_group(
title='Stochastic Gradient Descent optimizer (SGD) options',
description='The network is trained using a SGD optimizer.'
)
sgd_group.add_argument(
'-r', '--learning-rate', type=float, default=defaults.LEARNING_RATE,
help='Learning rate.'
)
sgd_group.add_argument(
'-m', '--momentum', type=float, default=defaults.MOMENTUM,
help='Number of epochs to train the network.'
)
sgd_group.add_argument(
'-d', '--decay', type=float, default=defaults.DECAY,
help='Learning rate decay over each update.'
)
sgd_group.add_argument(
'-n', '--nesterov', action='store_true',
help='Apply Nesterov momentum to the SGD optimizer.'
)
# Options for training the model
train_group = train_parser.add_argument_group(
title='Training options',
description='Options for how the network is to be trained.'
)
train_group.add_argument(
'-e', '--epochs', type=int, default=defaults.EPOCH,
help='The number of epochs to train the model.'
)
train_group.add_argument(
'-s', '--validation-split', type=float,
help='Fraction of the data to use as held-out validation data.'
)
train_group.add_argument(
'--v', '--verbose', type=int,
default=defaults.VERBOSE, choices=(0, 1, 2),
help='0 for no logging, 1 for progress bar, 2 for line per epoch.'
)
train_group.add_argument(
'-b', '--batch-size', type=int,
help='Number of samples per gradient update.'
)
def argument_parser(args):
parser = argparse.ArgumentParser(
description='Trains neural networks from labeled input data.'
)
# Create subparser
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
# Parse 'train' command
train_parser = subparsers.add_parser(
'train', help='Train a neural network from the given input.'
)
_build_training_subparser(train_parser)
# Return parsed arguments
return parser.parse_args(args)
def main():
"""Entry point for the console script usage of this package.
Returns:
int: Error return code.
"""
args = argument_parser(sys.argv[1:])
return 0
```
|
{
"source": "jepayne1138/ModularMailer-PluginBase",
"score": 2
}
|
#### File: ModularMailer-PluginBase/mmpluginbase/plugin_base.py
```python
import abc
class FileDriverBase(abc.ABC):
def onLoad(self):
pass
@abc.abstractmethod
def read(self, filename, **kwargs):
pass
```
|
{
"source": "jepayne1138/PTCAccount",
"score": 3
}
|
#### File: PTCAccount/ptcaccount/accountcreator.py
```python
from six.moves import range
import random
import string
# urllib imports supporting Python 2 and 3
try:
# Python 3
from urllib.parse import urlencode
except ImportError:
# Python 2
from urllib import urlencode
import requests
from ptcaccount.ptcexceptions import *
__all__ = [
'create_account',
'random_account',
'PROVIDER',
'USERNAME',
'PASSWORD',
'EMAIL'
]
# Constants defining the keys of the returned account dictionary
PROVIDER = 'provider'
USERNAME = 'username'
PASSWORD = 'password'
EMAIL = 'email'
_PTC_PROVIDER = 'ptc' # Account provider (APIs take 'ptc' or 'google')
# The base URL for Pokemon Trainer Club
_BASE_URL = 'https://club.pokemon.com/us/pokemon-trainer-club'
# Account creation validation is done by checking the response URLs
# The following are control flow URL constants
_SUCCESS_DESTS = (
'https://club.pokemon.com/us/pokemon-trainer-club/parents/email', # This initially seemed to be the proper success redirect
'https://club.pokemon.com/us/pokemon-trainer-club/sign-up/', # but experimentally it now seems to return to the sign-up, but still registers
)
# As both seem to work, we'll check against both success destinations until I have I better idea for how to check success
_DUPE_EMAIL_DEST = 'https://club.pokemon.com/us/pokemon-trainer-club/forgot-password?msg=users.email.exists'
_BAD_DATA_DEST = 'https://club.pokemon.com/us/pokemon-trainer-club/parents/sign-up'
class PTCSession(requests.Session):
""""A Session subclass handling creating, sending, & validating requests
A likely unnecessary subclass of requests.Session, but I thought it
helped to clean up the code.
"""
def request(self, url, headers=None, data=None, resp_code=None, **kwargs):
"""
Creates, sends, and validates a request for this session.
If data parameter is provided, the request will be POST, otherwise
a GET request is sent
If a specific response status code is
expected, set the resp_code parameter and the status code of the
response will be validated after sending the request. If the status
codes doesn't match, an exception is raised.
Args:
url (str): URL to send.
headers (dict, optional): Headers to send. Defaults to {}.
data (dict, optional): Data for a POST request. Defaults to {}.
resp_code (int, optional): Check if this status code was returned
upon receiving a response. If no desired code is given, no
check will be made to validate the response status_code.
Defaults to None.
**kwargs: Keyword arguments passed to the Request object.
Returns:
requests.Response: The Response object for the sent request.
Raises:
PTCInvalidStatusCodeException: If a desired response code was
provided (resp_code), raise this exception if the actual
response status codes does not match the desired code.
"""
# Set headers to an empty dict if no argument provided
headers = {} if headers is None else headers
# Encode the data dict if provided
if isinstance(data, dict):
data = urlencode(data, doseq=True)
# If data provided, the request must be a POST method
method = 'POST' if data else 'GET'
# Create, prepare, and send the request
req = requests.Request(method, url, data=data, **kwargs)
prepped = self.prepare_request(req)
prepped.headers.update(headers)
resp = self.send(prepped)
# Validate the status_code if a desired code was given
if resp_code is not None and resp.status_code != resp_code:
raise PTCInvalidStatusCodeException(str(resp.status_code))
# Return the Response object
return resp
def _random_string(length=15):
"""Generate a random alpha-numeric string of the given length
Args:
length (int, optional): Length of the string to randomly generate.
Defaults to 15.
Returns:
str: String of the desired length consiting of upper, lower, and
numeric characters.
"""
return ''.join(
[random.choice(string.ascii_letters + string.digits) for _ in range(length)]
)
def _random_email(local_length=10, sub_domain_length=5, top_domain='.com'):
"""Generate a random email-like string
Generates a random email-like string (i.e. [email protected]).
The length of both the local section and sub-domain section can be
modified, and a different top-level domain can be set.
Args:
local_length (int, optional): Length of the local portion of the fake
email. Defaults to 10.
sub_domain_length (int, optional): Length of the sub-domain portion of
the fake email. Defaults to 5.
top_domain (str, optional): String to append to the end of the fake
email as the top-level domain. Defaults to '.com'
Returns:
str: Random email-like string.
"""
return '{local}@{sub_domain}{top_domain}'.format(
local=_random_string(local_length),
sub_domain=_random_string(sub_domain_length),
top_domain=top_domain,
)
def _validate_password(password):
"""Validates that the password can be used to create a PTC account
As currently the only requirement I am aware of is a length restriction,
this only checks that the give password string is between 6 and 15
characters long. If I determine any other restrictions, they can be
added here later.
Args:
password (str, optional): Password to be validated.
Returns:
bool: True if the password is valid. (Does not return false, rather
raise exception with description of invalid nature.)
Raises:
PTCInvalidPasswordException: If the given password is not a valid
password that can be used to make an account. (Currently just
validates length, so this means the given password was not between
6 and 15 characters long.)
"""
# Check that password length is between 6 and 15 characters long
if len(password) < 6 or len(password) > 15:
raise PTCInvalidPasswordException('Password must be between 6 and 15 characters.')
return True
def _tag_email(email_address, tag):
"""Add a plus sign and the tag before the first at sign in the email
Args:
email_address (str): Email address tag is to be added to.
tag (str): Tag to add after the plus sign before first at sign.
Returns:
str: Email with the tag added.
"""
return email_address.replace('@', '+{}@'.format(tag), 1)
def create_account(username, password, email):
"""Creates a new Pokemon Trainer Club account
Creates a new PTC account with the given username, password and email.
Currently sets the following account settings:
- Date of birth: 1970-01-01
- Country: US
- Public profile: False
- Screen name: ''
Args:
username (str): Username for the PTC account
password (str): Password for the PTC account
email (str): Email for the PtC account
Returns:
bool: True if the account was successfully created. (Should not ever
return false, rather raise exceptions detailing type of failure.)
Raises:
PTCInvalidNameException: If the given username is already in use.
PTCInvalidPasswordException: If the given password is not a valid
password that can be used to make an account. (Currently just
validates length, so this means the given password was not between
6 and 15 characters long.)
PTCInvalidEmailException: If the given email was either in an invalid
format (i.e. not [email protected]) or the email is already
registered to an existing account.
PTCInvalidStatusCodeException: If an invalid status code was received
at any time. (Server or underlying code issue; try again and submit
bug report on continues failure if creation works in browser.)
"""
# Validate a user given password
if password is not None:
_validate_password(password)
# Set up the session
session = PTCSession()
successfully_created = False
while not successfully_created:
try:
# (Emulates navigating to the sign-up age verification page)
session.request(
url='{base_url}/parents/sign-up'.format(base_url=_BASE_URL),
headers={ # No headers required
'Host': 'club.pokemon.com',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch, br',
'Accept-Language': 'en-US,en;q=0.8',
},
resp_code=200
)
# Post request submitting date of birth and country
session.request(
url='{base_url}/sign-up/'.format(base_url=_BASE_URL),
headers={ # Content-Type and Referer headers are required
'Host': 'club.pokemon.com',
'Cache-Control': 'max-age=0',
'Origin': 'https://club.pokemon.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Referer': '{base_url}/sign-up/'.format(base_url=_BASE_URL),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8'
},
data={
'csrfmiddlewaretoken': session.cookies.get_dict()['csrftoken'],
'dob': '1970-01-01',
'country': 'US',
},
resp_code=200
)
# Post request submitting account information
resp = session.request(
url='{base_url}/parents/sign-up'.format(base_url=_BASE_URL),
headers={ # Content-Type and Referer headers are required
'Host': 'club.pokemon.com',
'Cache-Control': 'max-age=0',
'Origin': 'https://club.pokemon.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Referer': 'https://club.pokemon.com/us/pokemon-trainer-club/parents/sign-up',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.8'
},
data={
'csrfmiddlewaretoken': session.cookies.get_dict()['csrftoken'],
'username': username,
'password': password,
'confirm_password': password,
'email': email,
'confirm_email': email,
'public_profile_opt_in': 'False',
'screen_name': '',
'terms': 'on',
},
resp_code=200
)
# Indicates that we created the account and can exit the retry loop
successfully_created = True
except PTCInvalidStatusCodeException:
print("[-] Received invalid status code, retrying ...")
# Validate response
return _validate_response(resp)
def _validate_response(resp):
"""Validate final request response to determine if account was created
Args:
resp (requests.Response): Response instance from sending a requests
Returns:
bool: True if the account was successfully created. (Should not ever
return false, rather raise exceptions detailing type of failure.)
Raises:
PTCInvalidNameException: If the given username is already in use.
PTCInvalidPasswordException: If the given password is not a valid
password that can be used to make an account. (Currently just
validates length, so this means the given password was not between
6 and 15 characters long.)
PTCInvalidEmailException: If the given email was either in an invalid
format (i.e. not <EMAIL>) or the email is already
registered to an existing account.
PTCInvalidStatusCodeException: If an invalid status code was received
at any time. (Server or underlying code issue; try again and submit
bug report on continues failure if creation works in browser.)
"""
if resp.url in _SUCCESS_DESTS:
return True
elif resp.url == _DUPE_EMAIL_DEST:
raise PTCInvalidEmailException('Email already in use.')
elif resp.url == _BAD_DATA_DEST:
if 'Enter a valid email address.' in resp.text:
raise PTCInvalidEmailException('Invalid email.')
else:
raise PTCInvalidNameException('Username already in use.')
else:
raise PTCException('Generic failure. User was not created.')
return False # Should never hit here
def random_account(username=None, password=<PASSWORD>, email=None, email_tag=False):
"""Crate a random Pokemon Trainer Club account
Creates a new account with random username, password, and email.
If any of those parameters are given, use them instead of creating
a random replacement.
If a password is given, it must be valid, and an exception will be
raised if the password is not acceptable.
New random strings will be generated for username and email on a failure
so that eventually a new account will be successfully created. However,
if a specific username or email was given and account creation fails,
a new string will not be generated as it assumes the user wanted to use
that specific value. Instead, and exception is raised indicating the
reason for account creation failure.
Args:
username (str, optional): Specific username for the new account.
Defaults to a random alpha-numeric string.
password (str, optional): Specific password for the new account.
Defaults to a random alpha-numeric string.
email (str, optional): Specific email for the new account. Defaults
to a randomly generated email-like string.
email_tag (bool, optional): The username should be added as a tag
to the email address. Defaults to False.
Returns:
Dict[str, str]: A dict of the new account information, containing the
provider ('ptc'), username, password, and email. Access using the
exposed constants PROVIDER, USERNAME, PASSWORD, and EMAIL.
Raises:
PTCInvalidNameException: If the given username is already in use.
PTCInvalidPasswordException: If the given password is not a valid
password that can be used to make an account. (Currently just
validates length, so this means the given password was not between
6 and 15 characters long.)
PTCInvalidEmailException: If the given email was either in an invalid
format (i.e. not <EMAIL>) or the email is already
registered to an existing account.
PTCInvalidStatusCodeException: If an invalid status code was received
at any time. (Server or underlying code issue; try again and submit
bug report on continues failure if creation works in browser.)
"""
try_username = _random_string() if username is None else str(username)
password = _random_string() if password is None else str(password)
try_email = _random_email() if email is None else str(email)
account_created = False
while not account_created:
# Add tag in loop so that it is update if email or username changes
if email_tag:
try_email = _tag_email(try_email, try_username)
# Attempt to create the new account
try:
account_created = create_account(
try_username, password, try_email
)
except PTCInvalidNameException:
# If no username was provided, create new username and try again
if username is None:
try_username = _random_string()
else:
# If username was provided, re-raise the exception for bad name
raise
except PTCInvalidEmailException:
if email is None:
try_email = _random_email()
elif email_tag and username is None:
# If the bad email has a tag of a random username,
# re-generate a new username and try again
try_username = _random_string()
else:
# If email was provided, re-raise the exception for bad email
raise
# Return the username, password, and email of the new account
return {
PROVIDER: _PTC_PROVIDER,
USERNAME: try_username,
PASSWORD: password,
EMAIL: try_email,
}
```
|
{
"source": "jepayne1138/TownMapServer",
"score": 3
}
|
#### File: TownMapServer/townmapserver/console.py
```python
import sys
import argparse
import townmapserver.database as database
import townmapserver.server as server
def build_run_parser(subparsers):
run_parser = subparsers.add_parser(
'run', help='Run the server.'
)
run_parser.add_argument(
'-a', '--address', type=str, default='localhost',
help='Host address for running the server'
)
run_parser.add_argument(
'-p', '--port', type=int, default=5000,
help='Port for running the server'
)
run_parser.add_argument(
'-d', '--debug', action='store_true',
help='Runs server in debug mode.'
)
return run_parser
def build_database_parser(subparsers):
db_parser = subparsers.add_parser(
'database', help='Manage the database.'
)
db_subparser = db_parser.add_subparsers(dest='subcommand')
db_subparser.required = True
# Create database subcommand
db_create_parser = db_subparser.add_parser(
'create', help='Create the database schema from the defined models.'
)
db_create_parser.add_argument(
'username', type=str,
help='Name of Postgres login role with create permissions.'
)
db_create_parser.add_argument(
'password', type=str,
help='Password of Postgres login role with create permissions.'
)
db_connection_uri_parser = db_subparser.add_parser(
'uri', help='Display the connection URI'
)
return db_parser
def parse_arguments(args):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Manages the backend server API for the Town Map app'
)
subparsers = parser.add_subparsers(dest='command')
subparsers.required = True
# Build command parsers
build_run_parser(subparsers)
build_database_parser(subparsers)
return parser.parse_args(args)
def handle_command_database(args):
if args.subcommand == 'create':
database.create_schema(server.flaskApp, args.username, args.password)
if args.subcommand == 'uri':
print(database.build_connection_uri())
def handle_command_run(args):
server.launch_server(args.address, args.port, args.debug)
def handle_command(args):
globals()['handle_command_{}'.format(args.command)](args)
# launch_server(args.address, args.port)
def main():
args = parse_arguments(sys.argv[1:])
handle_command(args)
```
#### File: TownMapServer/townmapserver/database.py
```python
import time
import os
import flask_sqlalchemy
# To use, we need to create a settings.py modules in the same directory with
# the following variables (USER, PASSWORD, HOST, PORT, DATABASE)
db = flask_sqlalchemy.SQLAlchemy()
class User(db.Model):
userId = db.Column(db.Integer, primary_key=True)
googleId = db.Column(db.String(32), unique=True) # TODO: Check length
trainerName = db.Column(db.String(32), unique=True)
trainerLevel = db.Column(db.Integer)
def __init__(self, googleId, trainerName, trainerLevel=0):
self.googleId = googleId
self.trainerName = trainerName
self.trainerLevel = trainerLevel
# Database table definition
class Catch(db.Model):
catchId = db.Column(db.Integer, primary_key=True)
userId = db.Column(db.Integer, db.ForeignKey('user.userId'))
user = db.relationship('User', backref=db.backref('catches', lazy='joined'))
usingLure = db.Column(db.Boolean)
usingIncense = db.Column(db.Boolean)
creatureId = db.Column(db.Integer)
latitude = db.Column(db.Float)
longitude = db.Column(db.Float)
catchTime = db.Column(db.Float) # Store as Unix time
def __init__(
self, user, creatureId, latitude, longitude,
usingLure=False, usingIncense=False, catchTime=None):
self.user = user
self.creatureId = creatureId
self.usingLure = usingLure
self.usingIncense = usingIncense
self.latitude = latitude
self.longitude = longitude
if catchTime is None:
catchTime = time.time()
self.catchTime = catchTime
def initialize_app(app):
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL']
# Disable Flask-SQLAlchemy tracking of modification to objects
# See: http://flask-sqlalchemy.pocoo.org/2.1/config/
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
with app.app_context():
db.init_app(app)
def create_schema(app):
initialize_app(app)
with app.app_context():
db.create_all()
```
#### File: TownMapServer/townmapserver/server.py
```python
import flask
import flask_restful as restful
import townmapserver.resources as resources
import townmapserver.database as database
flaskApp = flask.Flask(__name__)
def add_resources(api, resource_list):
"""Add all resources to the api in the resource list.
All objects in the resource_list must implement the ResourceBase abc.
"""
for resource in resource_list:
api.add_resource(
resource, *resource.urls(), endpoint=resource.endpoint()
)
def launch_server(address, port, debug=False):
# Initialize the Flask application database
database.initialize_app(flaskApp)
api = restful.Api(flaskApp)
add_resources(api, resources.ResourceBase.__subclasses__())
# Run the server
flaskApp.run(host=address, port=port, debug=debug)
```
|
{
"source": "jepcor97/agnpy",
"score": 3
}
|
#### File: agnpy/constraints/spectral_constraints.py
```python
import numpy as np
import astropy.units as u
from astropy.constants import e, sigma_T
from ..utils.conversion import mec2
e = e.gauss
__all__ = ["SpectralConstraints"]
class SpectralConstraints:
r""" Class to describe the self-consistency constraints on the electron energy distribution
Parameters
----------
blob : :class: `Blob` emission region
"""
def __init__(self, blob):
self.blob = blob
@property
def gamma_max_larmor(self):
r"""maximum Lorentz factor of electrons that have their Larmour radius
smaller than the blob radius: :math:`R_L < R_b`.
The Larmor frequency and radius in Gaussian units read
.. math::
\omega_L &= \frac{eB}{\gamma m_e c} \\
R_L &= \frac{v}{\omega_L} = \frac{\gamma m_e v c}{e B} \approx \frac{\gamma m_e c^2}{e B}
therefore
.. math::
R_L < R_b \Rightarrow \gamma_{\mathrm{max}} < \frac{R_b e B}{m_e c^2}
"""
return (self.blob.R_b * e * self.blob.B_cgs / mec2).to_value("")
@property
def gamma_max_ballistic(self):
r"""Naive estimation of maximum Lorentz factor of electrons comparing
acceleration time scale with ballistic time scale.
For the latter we assume that the particles crosses the blob radius.
.. math::
(\mathrm{d}E/\mathrm{d}t)_{\mathrm{acc}} &= \xi c E / R_L \\
T_{\mathrm{acc}} &= E \,/\,(\mathrm{d}E/\mathrm{d}t)_{\mathrm{acc}} = R_L / (\xi c) \\
T_{\mathrm{bal}} &= R_b / c \\
T_{\mathrm{acc}} &< T_{\mathrm{bal}}
\Rightarrow \gamma_{\mathrm{max}} < \frac{\xi R_b e B}{m_e c^2}
"""
return self.blob.xi * self.gamma_max_larmor
@property
def gamma_max_synch(self):
r"""Simple estimation of maximum Lorentz factor of electrons
comparing the acceleration time scale with the synchrotron energy loss
.. math::
(\mathrm{d}E/\mathrm{d}t)_{\mathrm{acc}} &= \xi c E / R_L \\
(\mathrm{d}E/\mathrm{d}t)_{\mathrm{synch}} &= 4 / 3 \sigma_T c U_B \gamma^2 \\
(\mathrm{d}E/\mathrm{d}t)_{\mathrm{acc}} &= (\mathrm{d}E/\mathrm{d}t)_{\mathrm{synch}}
\Rightarrow \gamma_{\mathrm{max}} < \sqrt{\frac{6 \pi \xi e}{\sigma_T B}}
"""
return np.sqrt(
6 * np.pi * self.blob.xi * e / (sigma_T * self.blob.B_cgs)
).to_value("")
@property
def gamma_max_SSC(self):
r"""Simple estimation of maximum Lorentz factor of electrons
comparing the acceleration time scale with the SSC energy loss (in Thomson range)
WARNING: the highest energy electrons will most often scatter in Klein-Nishina range instead
.. math::
(\mathrm{d}E/\mathrm{d}t)_{\mathrm{acc}} &= \xi c E / R_L \\
(\mathrm{d}E/\mathrm{d}t)_{\mathrm{SSC}} &= 4 / 3 \sigma_T c U_{\mathrm{synch}} \gamma^2 \\
(\mathrm{d}E/\mathrm{d}t)_{\mathrm{acc}} &= (\mathrm{d}E/\mathrm{d}t)_{\mathrm{SSC}}
\Rightarrow \gamma_{\mathrm{max}} < \sqrt{\frac{3 \xi e B }{\sigma_T U_SSC}}
"""
return np.sqrt(
3
* self.blob.xi
* e
* self.blob.B_cgs
/ (4 * sigma_T * self.blob.u_ph_synch)
).to_value("")
def gamma_max_EC_DT(self, dt, r=0 * u.cm):
r"""Simple estimation of maximum Lorentz factor of electrons comparing the acceleration time scale
with the EC energy loss (in Thomson range, see B&G 1970), like in gamma_max_SSC
WARNING: assumes Thomson regime
.. math::
\gamma_{\mathrm{max}} = \sqrt{\frac{3 \xi e B }{ \sigma_T U'_\mathrm{ext}}}
"""
return np.sqrt(
3 * self.blob.xi * e * self.blob.B_cgs / (4 * sigma_T * dt.u(r, self.blob))
).to_value("")
@property
def gamma_break_synch(self):
r"""Simple estimation of the cooling break of electrons comparing
synchrotron cooling time scale with the ballistic time scale:
.. math::
T_{\mathrm{synch}} &= E\,/\,(\mathrm{d}E/\mathrm{d}t)_{\mathrm{synch}}
= 3 m_e c^2 / (4 \sigma_T U_B \gamma) \\
T_{\mathrm{bal}} &= R_b / c \\
T_{\mathrm{synch}} &= T_{\mathrm{bal}} \Rightarrow \gamma_b = 6 \pi m_e c^2 / \sigma_T B^2 R_b
"""
gamma_max = (
(
6
* np.pi
* mec2
/ (sigma_T * np.power(self.blob.B_cgs, 2) * self.blob.R_b)
)
.to("")
.value
)
return gamma_max
@property
def gamma_break_SSC(self):
r"""Simple estimation of the cooling break of electrons comparing
SSC time scale (see B&G 1970) with the ballistic time scale:
WARNING: only applicable in Thomson regime
.. math::
T_{\mathrm{SSC}} &= E\,/\,(\mathrm{d}E/\mathrm{d}t)_{\mathrm{SSC}}
= 3 m_e c^2 / (4 \sigma_T U_{\mathrm{SSC}} \gamma) \\
T_{\mathrm{bal}} &= R_b / c \\
T_{\mathrm{SSC}} &= T_{\mathrm{bal}} \Rightarrow \gamma_b = 3 m_e c^2 / 4 \sigma_T U_{\mathrm{SSC}} R_b
"""
return (
(3 * mec2 / (4 * sigma_T * self.blob.u_ph_synch * self.blob.R_b))
.to("")
.value
)
def gamma_break_EC_DT(self, dt, r=0 * u.cm):
r"""Simple estimation of the cooling break of electrons comparing
EC time scale (see B&G 1970) with the ballistic time scale, like in gamma_break_SSC
WARNING: assumes Thomson regime
.. math::
\gamma_b = 3 m_e c^2 / 4 \sigma_T U'_{\mathrm{ext}} R_b
"""
# u_ext=np.power(self.Gamma,2) * np.power(1-mu*self.Beta,2) * dt.xi_dt*dt.L_disk/(4*np.pi*np.power(d,2) * c)
return (
(3 * mec2 / (4 * sigma_T * dt.u(r, self.blob) * self.blob.R_b)).to("").value
)
```
|
{
"source": "jepebe/fractal",
"score": 2
}
|
#### File: jepebe/fractal/fractal.py
```python
import time
import numpy as np
import pxng
from pxng.keys import *
from fractal import Range2D
from fractal import py_create_fractal
from fractal.shader_fractal import ShaderFractal
from fractal.term_fractal import term_fractal
from fractal._fractal import create_fractal as rust_create_fractal
def handle_input(window, context):
context['mouse'] = window.mouse.x, window.mouse.y
context['mouse_delta'] = window.mouse.dx, window.mouse.dy
if window.mouse.hover and window.mouse.button_left.held:
context['panning'] = True
context['dirty'] = True
elif window.mouse.hover and window.mouse.button_left.released:
context['panning'] = False
context['dirty'] = True
if window.key_state(KEY_SPACE).pressed:
context['iterations'] = 128
context['world'] = WorldSpace(0, 0, window.width, window.height, -2, -1, 1, 1)
context['dirty'] = True
if window.key_state(KEY_Q).pressed:
window.close_window()
if window.key_state(KEY_1).pressed:
context['method'] = 'Rust'
context['dirty'] = True
if window.key_state(KEY_2).pressed:
context['method'] = 'Python'
context['dirty'] = True
if window.key_state(KEY_3).pressed:
context['method'] = 'Shader'
context['dirty'] = True
if window.key_state(KEY_T).pressed:
iterations = context['iterations']
frac = calculate_world_view(window)
term_fractal(iterations=iterations, fra=frac)
if window.key_state(KEY_F).pressed:
frac = calculate_world_view(window)
print(f'dx={frac[2] - frac[0]} dy={frac[3] - frac[1]}')
if window.key_state(KEY_I).pressed:
iterations = context['iterations']
iterations += 64 if iterations >= 64 else 16
iterations = min(2048, iterations)
context['iterations'] = iterations
context['dirty'] = True
if window.key_state(KEY_J).pressed:
iterations = context['iterations']
iterations -= 64 if iterations >= 128 else 16
iterations = max(16, iterations)
context['iterations'] = iterations
context['dirty'] = True
scroll_dy = window.mouse.scroll_dy
if window.key_state(KEY_Z).held or scroll_dy > 0:
context['world'].zoom(1.1)
context['dirty'] = True
if window.key_state(KEY_X).held or scroll_dy < 0:
context['world'].zoom(0.9)
context['dirty'] = True
def update(window: pxng.Window):
handle_input(window, window.context)
if window.context['dirty']:
render_fractal(window)
if window.context['method'] == 'Shader':
now = time.time()
# Since we are not rendering to texture we need to draw every frame
iterations = window.context['iterations']
frac = calculate_world_view(window)
pix = Range2D(0, 0, w, h)
sf: ShaderFractal = window.context['shaderfractal']
sf.create_fractal(window._spaces, pix, frac, iterations=iterations)
window.context['rendering_time'] = time.time() - now
else:
sprite = window.context['sprite']
window.draw_sprite(0, 0, sprite, scale=1)
window.draw_text(0, 0, 'Fractal Renderer', scale=2)
rendering_time = window.context["rendering_time"]
iterations = window.context["iterations"]
window.draw_text(0, 18, f'Iterations: {iterations}')
window.draw_text(0, 28, f'Time Taken: {rendering_time :.04f} s.')
method = window.context['method']
window.draw_text(0, 36, f'Method: {method}')
class WorldSpace:
def __init__(self, sx1, sy1, sx2, sy2, wx1, wy1, wx2, wy2):
self.sx1 = sx1
self.sy1 = sy1
self.sx2 = sx2
self.sy2 = sy2
self.wx1 = wx1
self.wy1 = wy1
self.wx2 = wx2
self.wy2 = wy2
self._x_scale = (self.sx2 - self.sx1) / (self.wx2 - self.wx1)
self._y_scale = (self.sy2 - self.sy1) / (self.wy2 - self.wy1)
self._offset_x = self.wx1
self._offset_y = self.wy1
self._zoom = 1
def screen_to_world(self, x1, y1):
x = x1 / self._x_scale + self._offset_x
y = y1 / self._y_scale + self._offset_y
return x, y
def screen_to_world_units(self, dx, dy):
x = dx / self._x_scale
y = dy / self._y_scale
return x, y
def apply_zoom(self):
self._x_scale *= self._zoom
self._y_scale *= self._zoom
self._zoom = 1
def zoom(self, zoom_factor):
self._zoom *= zoom_factor
def adjust_offset(self, dx, dy):
self._offset_x += dx
self._offset_y += dy
def render_fractal(window: pxng.Window):
iterations = window.context['iterations']
sprite = window.context['sprite']
frac = calculate_world_view(window)
now = time.time()
pix = Range2D(0, 0, w, h)
if window.context['method'] == 'Rust':
rust_create_fractal(pix, frac, iterations=iterations, data=sprite._data)
sprite.update()
elif window.context['method'] == 'Python':
py_create_fractal(pix, frac, iterations=iterations, data=sprite)
window.context['rendering_time'] = time.time() - now
window.context['dirty'] = False
def calculate_world_view(window: pxng.Window):
world: WorldSpace = window.context['world']
if window.context['panning']:
dx, dy = window.context['mouse_delta']
dx, dy = world.screen_to_world_units(dx, dy)
world.adjust_offset(dx, dy)
mx, my = window.context['mouse']
pzmx, pzmy = world.screen_to_world(mx, my)
world.apply_zoom()
zmx, zmy = world.screen_to_world(mx, my)
world.adjust_offset(pzmx - zmx, pzmy - zmy)
fx1, fy1 = world.screen_to_world(0, 0)
fx2, fy2 = world.screen_to_world(window.width, window.height)
return Range2D(fx1, fy1, fx2, fy2)
if __name__ == "__main__":
w = 1280
h = 720
sprite = pxng.Sprite(np.zeros((h, w, 3), dtype=np.uint8))
window = pxng.Window(w, h, 'Fractal', scale=1)
window.context = {
'sprite': sprite,
'count': 0,
'dirty': True,
'iterations': 128,
'rendering_time': 0,
'method': 'Shader', # Options: Rust, Python, Shader
'world': WorldSpace(0, 0, w, h, -2, -1, 1, 1),
'panning': False,
'mouse': (0, 0),
'mouse_delta': (0, 0),
'shaderfractal': ShaderFractal()
}
render_fractal(window)
window.set_update_handler(update)
window.start_event_loop()
```
|
{
"source": "jepebe/pixelengine",
"score": 3
}
|
#### File: pixelengine/examples/animated_sprites.py
```python
import pxng
from pxng.colors import LIGHT_GREEN, DARK_GREY
from pxng.keys import KEY_SPACE, KEY_Q
def update(window: pxng.Window):
handle_input(window)
paused = window.context['paused']
window.draw_grid(size=5, tint=(0.125, 0.125, 0.125), gap_size=1, dash_size=1)
window.draw_grid(size=20, tint=DARK_GREY)
window.draw_text(5, 5, "Animated Sprites", tint=LIGHT_GREEN)
flame_sprite = window.context['flame_sprite']
if not paused:
flame_sprite.advance_time(window.elapsed_time)
window.draw_sprite(250, 10, flame_sprite, 0.5)
fire_sprite = window.context['fire_sprite']
if not paused:
fire_sprite.advance_time(window.elapsed_time)
window.draw_sprite(200, 240 - 128, fire_sprite, 1)
window.draw_sprite(30, 240 - 64, fire_sprite, 0.5)
fire2_sprite = window.context['fire2_sprite']
if not paused:
fire2_sprite.advance_time(window.elapsed_time)
window.draw_sprite(105, 240 - 128, fire2_sprite, 1)
pale_blue = window.context['pale_blue']
if not paused:
pale_blue.advance_time_all(window.elapsed_time)
pale_blue.set_current_animation('walk_left')
window.draw_sprite(70, 40, pale_blue, scale=0.5)
window.draw_sprite(90, 40, pale_blue, scale=1)
window.draw_sprite(120, 40, pale_blue, scale=2)
window.draw_sprite(170, 40, pale_blue, scale=3)
x, y = window.context['big_pos']
pale_blue.set_current_animation('run_right')
window.draw_sprite(0-x, 40 + y, pale_blue, scale=4)
pale_blue.set_current_animation('shoot_right')
window.draw_sprite(120, 200, pale_blue)
pale_blue.set_current_animation('shoot_left')
window.draw_sprite(170, 200, pale_blue)
pale_blue.set_current_animation('idle_right')
window.draw_sprite(0, 200, pale_blue)
pale_blue.set_current_animation('idle_left')
window.draw_sprite(70, 200, pale_blue)
if not window.context['paused']:
window.context['frame'] += 1
def handle_input(window: pxng.Window):
if window.key_state(KEY_SPACE).pressed:
window.context['paused'] = not window.context['paused']
if window.key_state(KEY_Q).pressed:
window.close_window()
if window.mouse.hover and window.mouse.button_left.held:
x, y = window.context['big_pos']
x += window.mouse.dx / window.x_scale
y -= window.mouse.dy / window.y_scale
window.context['big_pos'] = (x, y)
if __name__ == "__main__":
spr = pxng.Sprite.create_from_image('sprites/pale_blue_original.png')
pale_blue = pxng.AnimatedSprite(spr, grid_size=(8, 8))
pale_blue.set_animation('idle_left', [(x, 0) for x in range(5)], fps=5)
pale_blue.set_animation('idle_right', [(x, 1) for x in range(5)], fps=5)
pale_blue.set_animation('walk_left', [(x, 2) for x in range(8)], fps=10)
pale_blue.set_animation('run_left', [(x, 2) for x in range(8)], fps=20)
pale_blue.set_animation('walk_right', [(x, 3) for x in range(8)], fps=10)
pale_blue.set_animation('run_right', [(x, 3) for x in range(8)], fps=20)
pale_blue.set_animation('shoot_left', [(x, 4) for x in range(5)], fps=10)
pale_blue.set_animation('shoot_right', [(x, 5) for x in range(5)], fps=10)
spr = pxng.Sprite.create_from_image('sprites/lighter_flame_01.png')
flame_sprite = pxng.AnimatedSprite(spr, grid_size=(8, 8))
spr = pxng.Sprite.create_from_image('sprites/fire_01.png')
fire_sprite = pxng.AnimatedSprite(spr, grid_size=(8, 8))
spr = pxng.Sprite.create_from_image('sprites/fire_02.png')
fire2_sprite = pxng.AnimatedSprite(spr, grid_size=(8, 8))
window = pxng.Window(640, 480, 'PixelEngine', scale=2)
window.context['frame'] = 0
window.context['paused'] = False
window.context['flame_sprite'] = flame_sprite
window.context['fire_sprite'] = fire_sprite
window.context['fire2_sprite'] = fire2_sprite
window.context['pale_blue'] = pale_blue
window.context['big_pos'] = (0, 0)
window.set_update_handler(update)
window.start_event_loop()
```
#### File: pixelengine/pxng/buffer_object.py
```python
import glm
from OpenGL.GL import (GL_ARRAY_BUFFER, glGenBuffers, glBindBuffer, glBufferData,
GL_DYNAMIC_DRAW, glEnableVertexAttribArray, GL_UNSIGNED_INT,
GL_UNSIGNED_SHORT, GL_UNSIGNED_BYTE, GL_FLOAT,
glVertexAttribPointer, GL_ELEMENT_ARRAY_BUFFER, GL_DOUBLE)
class BufferObject:
def __init__(self, data_type, array_type=GL_ARRAY_BUFFER, max_size=10000):
self._data_type = data_type
self._array_type = array_type
self._arr = glm.array(data_type()) * max_size
self._index = 0
self._vbo = glGenBuffers(1)
self._changed = True
@property
def type(self):
return self._data_type
@property
def dtype(self):
return self._arr.dtype
@property
def index(self):
return self._index
def __len__(self):
return self._index
@property
def changed(self):
return self._changed
@property
def component_count(self):
return len(self._data_type())
@property
def bytes_per_element(self):
return self._arr.dt_size * self.component_count
def set_value(self, value):
self._arr[self._index] = value
self._index += 1
self._changed = True
def reset(self):
self._index = 0
self._changed = True
def bind(self, attrib_index):
if self._index == 0:
return False
glBindBuffer(self._array_type, self._vbo)
if self._changed:
data = self._arr
size = data.itemsize * self._index
glBufferData(self._array_type, size, data.ptr, GL_DYNAMIC_DRAW)
self._changed = False
if self._array_type == GL_ARRAY_BUFFER:
glEnableVertexAttribArray(attrib_index)
count = self.component_count
stride = self.bytes_per_element
dtype = self.dtype
if dtype == 'float32':
gl_type = GL_FLOAT
elif dtype == 'float64':
gl_type = GL_DOUBLE
elif dtype == 'uint8':
gl_type = GL_UNSIGNED_BYTE
elif dtype == 'uint16':
gl_type = GL_UNSIGNED_SHORT
elif dtype == 'uint32':
gl_type = GL_UNSIGNED_INT
else:
raise UserWarning(f'Unknown data type: {dtype}')
glVertexAttribPointer(attrib_index, count, gl_type, False, stride, None)
return True
```
#### File: pixelengine/pxng/grid.py
```python
import glm
from OpenGL.GL import GL_LINES
import pxng
class Grid:
def __init__(self, width, height):
program = pxng.ShaderProgram('GridShader')
program.add_shader(pxng.resource('shaders/line.vert'), pxng.ShaderType.Vertex)
program.add_shader(pxng.resource('shaders/line.frag'), pxng.ShaderType.Fragment)
program.compile_and_link()
program.add_uniform('projection_view', glm.mat4x4)
program.add_uniform('model', glm.mat4x4)
program.add_uniform('color', glm.vec4)
program.add_uniform('resolution', glm.vec2)
program.add_uniform('dash_size', glm.vec1)
program.add_uniform('gap_size', glm.vec1)
self._shader_program = program
self._vao = pxng.VertexArrayObject(GL_LINES)
self._vao.attach_buffer(pxng.BufferObject(data_type=glm.vec3)) # vertex buffer
x = 1
y = 1
while x < width:
self._vao.add_line(glm.vec3(x, 0, 0), glm.vec3(x, height, 0))
x += 1
while y < height:
self._vao.add_line(glm.vec3(0, y, 0), glm.vec3(width, y, 0))
y += 1
def draw(self, spaces: pxng.Spaces, size, dash_size, gap_size):
if self._vao.bind():
spaces.model.push()
spaces.model.scale((size, size, 1))
program = self._shader_program
program.activate()
program.set_uniform('projection_view', spaces.projection_view)
program.set_uniform('model', spaces.model.m)
program.set_uniform('color', spaces.tint)
program.set_uniform('resolution', glm.vec2(spaces.width, spaces.height))
program.set_uniform('dash_size', dash_size)
program.set_uniform('gap_size', gap_size)
self._vao.draw()
spaces.model.pop()
```
#### File: pixelengine/pxng/sprite.py
```python
from typing import Tuple
import glm
import imageio
from numpy.core.multiarray import ndarray
import pxng
from pxng import resource
from OpenGL.GL import GL_TRIANGLES, GL_RGBA, GL_RGB, glGenTextures, \
GL_TEXTURE_RECTANGLE, glBindTexture, glTexParameteri, GL_TEXTURE_MAG_FILTER, \
GL_NEAREST, GL_TEXTURE_MIN_FILTER, glTexImage2D, GL_UNSIGNED_BYTE, glTexSubImage2D, \
GL_BLEND, glDisable, GL_RED, glPixelStorei, GL_UNPACK_ALIGNMENT, glGetInteger
class SpriteRectangle:
def __init__(self):
program = pxng.ShaderProgram('SpriteShader')
program.add_shader(resource('shaders/sprite.vert'), pxng.ShaderType.Vertex)
program.add_shader(resource('shaders/sprite.frag'), pxng.ShaderType.Fragment)
program.compile_and_link()
program.add_uniform('projection_view', glm.mat4x4)
program.add_uniform('model', glm.mat4x4)
program.add_uniform('texture_matrix', glm.mat4x4)
program.add_uniform('color', glm.vec4)
program.add_uniform('sprite_texture', glm.ivec1)
self._program = program
self._vao = pxng.VertexArrayObject(GL_TRIANGLES)
self._vao.attach_buffer(pxng.BufferObject(data_type=glm.vec3)) # vertex buffer
self._vao.attach_buffer(pxng.BufferObject(data_type=glm.uvec2)) # texture buffer
self._vao.add_quad(
glm.vec3(0, 0, 0),
glm.vec3(0, -1, 0),
glm.vec3(1, -1, 0),
glm.vec3(1, 0, 0),
)
self._vao.set_texture(
glm.uvec2(0, 1),
glm.uvec2(0, 0),
glm.uvec2(1, 0),
glm.uvec2(1, 1)
)
def draw(self, spaces: pxng.Spaces):
if self._vao.bind():
self._program.activate()
self._program.set_uniform('projection_view', spaces.projection_view)
self._program.set_uniform('model', spaces.model.m)
self._program.set_uniform('texture_matrix', spaces.texture.m)
self._program.set_uniform('color', spaces.tint)
self._program.set_uniform('sprite_texture', 0)
self._vao.draw()
class Sprite:
def __init__(self, data: ndarray):
self._data = data
self._created = False
self._dirty = False
self._texid = None
self._width = data.shape[1]
self._height = data.shape[0]
self._components = 1
if len(data.shape) > 2:
self._components = data.shape[2]
if self._components == 4:
self._format = GL_RGBA
elif self._components == 3:
self._format = GL_RGB
elif self._components == 1:
self._format = GL_RED
self._rect = None
@property
def width(self):
return self._width
@property
def height(self):
return self._height
def set_pixel(self, x, y, color: Tuple[int, int, int]):
self._data[y, x] = color
self._dirty = True
def _set_unpack_alignment(self):
self._unpack_alignment = glGetInteger(GL_UNPACK_ALIGNMENT)
if self._width % 2 == 1 or self._width % 4 == 2:
# Odd sized texture
glPixelStorei(GL_UNPACK_ALIGNMENT, 1)
def _reset_unpack_alignment(self):
glPixelStorei(GL_UNPACK_ALIGNMENT, self._unpack_alignment)
def _create(self):
self._texid = glGenTextures(1)
glBindTexture(GL_TEXTURE_RECTANGLE, self._texid)
glTexParameteri(GL_TEXTURE_RECTANGLE, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_RECTANGLE, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
d = self._data
w = self._width
h = self._height
fmt = self._format
self._set_unpack_alignment()
glTexImage2D(GL_TEXTURE_RECTANGLE, 0, fmt, w, h, 0, fmt, GL_UNSIGNED_BYTE, d)
self._reset_unpack_alignment()
self._rect = SpriteRectangle()
self._created = True
def _update(self):
glBindTexture(GL_TEXTURE_RECTANGLE, self._texid)
d = self._data
w = self._width
h = self._height
fmt = self._format
self._set_unpack_alignment()
glTexSubImage2D(GL_TEXTURE_RECTANGLE, 0, 0, 0, w, h, fmt, GL_UNSIGNED_BYTE, d)
self._reset_unpack_alignment()
self._updated = True
def update(self):
self._dirty = True
def activate(self):
if not self._created:
self._create()
if self._dirty:
self._update()
glBindTexture(GL_TEXTURE_RECTANGLE, self._texid)
glTexParameteri(GL_TEXTURE_RECTANGLE, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_RECTANGLE, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
def deactivate(self):
glBindTexture(GL_TEXTURE_RECTANGLE, 0)
def _pre_draw(self, spaces: pxng.Spaces):
self.activate()
spaces.model.push()
spaces.texture.push()
def _post_draw(self, spaces: pxng.Spaces):
self.deactivate()
spaces.model.pop()
spaces.texture.pop()
def draw(self, spaces: pxng.Spaces):
"""
Draw the sprite at the current position.
Parameters
----------
spaces : pxng.Spaces
the current coordinate systems
"""
self._pre_draw(spaces)
spaces.model.translate((0, self._height, 0)) # anchor is upper left
spaces.model.scale((self._width, self._height, 1))
spaces.texture.scale(glm.vec3(self._width, self._height, 1))
self._rect.draw(spaces)
self._post_draw(spaces)
def draw_partial(self, spaces: pxng.Spaces, x, y, width, height):
"""
Draw a partial sprite using pixel coordinates for the partial image data.
The coordinates represent a sub region in the sprite.
Parameters
----------
spaces: pxng.Spaces
the current coordinate systems
x: int
x coordinate in pixel space
y: int
y coordinate in pixel space
width: int
width in number of pixels
height: int
height in number of pixels
"""
self._pre_draw(spaces)
spaces.model.translate((0, height, 0)) # anchor is upper left
spaces.model.scale((width, height, 1))
spaces.texture.translate(glm.vec3(x, y, 0))
spaces.texture.scale(glm.vec3(width, height, 1))
self._rect.draw(spaces)
self._post_draw(spaces)
@classmethod
def create_from_image(cls, path):
img_data = imageio.imread(path)
return Sprite(img_data)
```
#### File: pixelengine/pxng/_utils.py
```python
def resource(resource):
from pathlib import Path
font_path = Path(__file__).parent / 'resources' / resource
return str(font_path)
```
|
{
"source": "jepegit/cellpy",
"score": 2
}
|
#### File: readers/instruments/pec.py
```python
import os
from dateutil.parser import parse
from datetime import datetime
import logging
import warnings
import numpy as np
import pandas as pd
from cellpy.readers.core import FileID, Cell, humanize_bytes
from cellpy.parameters.internal_settings import get_headers_normal
from cellpy.readers.instruments.mixin import Loader
pec_headers_normal = dict()
pec_headers_normal["step_index_txt"] = "Step"
pec_headers_normal["cycle_index_txt"] = "Cycle"
pec_headers_normal["test_time_txt"] = "Total_Time_Seconds" # This might change
pec_headers_normal["step_time_txt"] = "Step_Time_Seconds" # This might change
pec_headers_normal["datetime_txt"] = "Real_Time"
pec_headers_normal["voltage_txt"] = "Voltage_mV" # This might change
pec_headers_normal["current_txt"] = "Current_mA" # This might change
pec_headers_normal["charge_capacity_txt"] = "Charge_Capacity_mAh"
pec_headers_normal["discharge_capacity_txt"] = "Discharge_Capacity_mAh"
pec_headers_normal["charge_energy_txt"] = "Charge_Capacity_mWh"
pec_headers_normal["discharge_energy_txt"] = "Discharge_Capacity_mWh"
pec_headers_normal["internal_resistance_txt"] = "Internal_Resistance_1_mOhm"
pec_headers_normal["test_id_txt"] = "Test"
# TODO: better reading of first part of the file (comments and headers)
# 1. find the units
# 2. convert cycle and step numbers so that they start with 1 and not 0
# 3. find user-defined variables
class PECLoader(Loader):
"""Main loading class"""
def __init__(self):
self.headers_normal = (
get_headers_normal()
) # should consider to move this to the Loader class
self.current_chunk = 0 # use this to set chunks to load
self.pec_data = None
self.pec_log = None
self.pec_settings = None
self.variable_header_keywords = [
"Voltage (V)",
"Current (A)",
] # The unit of these will be read from file
self.fake_header_length = [
"#RESULTS CHECK\n",
"#END RESULTS CHECK\n",
] # Ignores number of delimiters in between
self.pec_file_delimiter = ","
self.filename = None
self.number_of_header_lines = None # Number of header lines is not constant
self.cellpy_headers = (
get_headers_normal()
) # should consider to move this to the Loader class
# @staticmethod
# def _get_pec_units():
# pec_units = dict()
# pec_units["voltage"] = 0.001 # V
# pec_units["current"] = 0.001 # A
# pec_units["charge"] = 0.001 # Ah
# pec_units["mass"] = 0.001 # g
# pec_units["energy"] = 0.001 # Wh
# return pec_units
def _get_pec_units(self): # Fetches units from a csv file
# Mapping prefixes to values
prefix = {"ยต": 10 ** -6, "m": 10 ** -3, "": 1}
# Adding the non-variable units to the return value
pec_units = {"charge": 0.001, "mass": 0.001, "energy": 0.001} # Ah # g # Wh
# A list with all the variable keywords without any prefixes, used as search terms
header = self.variable_header_keywords
data = pd.read_csv(self.filename, skiprows=self.number_of_header_lines, nrows=1)
# Searching for the prefix for all the variable units
for item in data.keys():
for unit in header:
x = unit.find("(") - len(unit)
if unit[: x + 1] in item:
y = item[x].replace("(", "")
# Adding units conversion factor to return value, renaming the headers to include correct units
if header.index(unit) == 0:
pec_units["voltage"] = prefix.get(y)
pec_headers_normal["voltage_txt"] = f"Voltage_{y}V"
elif header.index(unit) == 1:
pec_units["current"] = prefix.get(y)
pec_headers_normal["current_txt"] = f"Current_{y}A"
return pec_units
def _get_pec_times(self):
# Mapping units to their conversion values
logging.debug("retrieve pec units")
units = {
"(Hours in hh:mm:ss.xxx)": self.timestamp_to_seconds,
"(Decimal Hours)": 3600,
"(Minutes)": 60,
"(Seconds)": 1,
}
data = pd.read_csv(self.filename, skiprows=self.number_of_header_lines, nrows=0)
pec_times = dict()
# Adds the time variables and their units to the pec_times dictonary return value
# Also updates the column headers in pec_headers_normal with the correct name
for item in data.keys():
for unit in units:
if unit in item:
x = item.find("(")
var = item[: x - 1].lower().replace(" ", "_")
its_unit = item[x:]
pec_times[var] = units.get(its_unit)
if var == "total_time":
pec_headers_normal[
"test_time_txt"
] = f'Total_Time_{its_unit[1:-1].replace(" ", "_")}'
if var == "step_time":
pec_headers_normal[
"step_time_txt"
] = f'Step_Time_{its_unit[1:-1].replace(" ", "_")}'
return pec_times
@staticmethod
def get_raw_units():
"""Include the settings for the units used by the instrument.
The units are defined w.r.t. the SI units ('unit-fractions'; currently only units that are multiples of
Si units can be used). For example, for current defined in mA, the value for the
current unit-fraction will be 0.001.
Returns: dictionary containing the unit-fractions for current, charge, and mass
"""
raw_units = dict()
raw_units["voltage"] = 1.0 # V
raw_units["current"] = 1.0 # A
raw_units["charge"] = 1.0 # Ah
raw_units["mass"] = 0.001 # g
raw_units["energy"] = 1.0 # Wh
raw_units["total_time"] = 1.0 # s
raw_units["step_time"] = 1.0 # s
return raw_units
def get_raw_limits(self):
"""Include the settings for how to decide what kind of step you are examining here.
The raw limits are 'epsilons' used to check if the current and/or voltage is stable (for example
for galvanostatic steps, one would expect that the current is stable (constant) and non-zero).
It is expected that different instruments (with different resolution etc.) have different
'epsilons'.
Returns: the raw limits (dict)
"""
warnings.warn("raw limits have not been subject for testing yet")
raw_limits = dict()
raw_limits["current_hard"] = 0.1 # There is a bug in PEC
raw_limits["current_soft"] = 1.0
raw_limits["stable_current_hard"] = 2.0
raw_limits["stable_current_soft"] = 4.0
raw_limits["stable_voltage_hard"] = 2.0
raw_limits["stable_voltage_soft"] = 4.0
raw_limits["stable_charge_hard"] = 2.0
raw_limits["stable_charge_soft"] = 5.0
raw_limits["ir_change"] = 0.00001
return raw_limits
def loader(self, file_name, bad_steps=None, **kwargs):
new_tests = []
if not os.path.isfile(file_name):
self.logger.info("Missing file_\n %s" % file_name)
return None
self.filename = file_name
self.number_of_header_lines = self._find_header_length()
filesize = os.path.getsize(file_name)
hfilesize = humanize_bytes(filesize)
txt = "Filesize: %i (%s)" % (filesize, hfilesize)
logging.debug(txt)
data = Cell()
fid = FileID(file_name)
# div parameters and information (probably load this last)
test_no = 1
data.cell_no = test_no
data.loaded_from = file_name
# some overall prms
data.channel_index = None
data.channel_number = None
data.creator = None
data.item_ID = None
data.schedule_file_name = None
data.test_ID = None
data.test_name = None
data.raw_data_files.append(fid)
# --------- read raw-data (normal-data) -------------------------
self._load_pec_data(file_name, bad_steps)
data.start_datetime = self.pec_settings["start_time"]
length_of_test = self.pec_data.shape[0]
logging.debug(f"length of test: {length_of_test}")
logging.debug("renaming columns")
self._rename_headers()
self._convert_units()
# cycle indices should not be 0
if 0 in self.pec_data["cycle_index"]:
self.pec_data["cycle_index"] += 1
data.raw = self.pec_data
data.raw_data_files_length.append(length_of_test)
new_tests.append(data)
return new_tests
def _load_pec_data(self, file_name, bad_steps):
number_of_header_lines = self.number_of_header_lines
# ----------------- reading the data ---------------------
df = pd.read_csv(file_name, skiprows=number_of_header_lines)
# get rid of unnamed columns
df = df.loc[:, ~df.columns.str.contains("^Unnamed")]
# get rid of spaces, parenthesis, and the deg-sign
new_column_headers = {
c: c.replace(" ", "_")
.replace("(", "")
.replace(")", "")
.replace("ยฐ", "")
.replace(r"%", "pct")
for c in df.columns
}
df.rename(columns=new_column_headers, inplace=True)
# add missing columns
df.insert(0, self.headers_normal.data_point_txt, range(len(df)))
df[self.headers_normal.sub_step_index_txt] = 0
df[self.headers_normal.sub_step_time_txt] = 0
self.pec_data = df
# ---------------- reading the parameters ---------------
with open(file_name, "r") as ofile:
counter = 0
lines = []
for line in ofile:
counter += 1
if counter > number_of_header_lines:
break
lines.append(line)
self._extract_variables(lines)
def _extract_variables(self, lines):
header_comments = dict()
comment_loop = False
for line_number, line in enumerate(lines):
if line.startswith("#"):
if not comment_loop:
comment_loop = True
else:
comment_loop = False
else:
if not comment_loop:
parts = line.split(",")
variable = parts[0].strip()
variable = variable.strip(":")
variable = variable.replace(" ", "_")
try:
value = parts[1].strip()
except IndexError:
value = None
if not value:
value = np.nan
header_comments[variable] = value
logging.debug(" Headers Dict ")
logging.debug(header_comments)
headers = dict()
start_time = parse(header_comments["Start_Time"])
end_time = parse(header_comments["End_Time"])
headers["start_time"] = start_time
headers["end_time"] = end_time
# headers["test_regime_name"] = header_comments["TestRegime_Name"]
self.pec_settings = headers
def _rename_headers(self):
logging.debug("Trying to rename the columns")
# logging.debug("Current columns:")
# logging.debug(self.pec_data.columns)
# logging.debug("Rename to:")
# logging.debug(self.headers_normal)
for key in pec_headers_normal:
self._rename_header(key, pec_headers_normal[key])
# logging.debug("New cols:")
# logging.debug(self.pec_data.columns)
def _convert_units(self):
logging.debug("Trying to convert all data into correct units")
logging.debug("- dtypes")
self.pec_data[self.headers_normal.datetime_txt] = pd.to_datetime(
self.pec_data[self.headers_normal.datetime_txt]
)
self.pec_data["Position_Start_Time"] = pd.to_datetime(
self.pec_data["Position_Start_Time"]
)
self.pec_data["Rack"] = self.pec_data["Rack"].astype("category")
logging.debug("- cellpy units")
pec_units = self._get_pec_units()
pec_times = self._get_pec_times()
raw_units = self.get_raw_units()
self._rename_headers() # Had to run this again after fixing the headers, might be a better way to fix this
_v = pec_units["voltage"] / raw_units["voltage"]
_i = pec_units["current"] / raw_units["current"]
_c = pec_units["charge"] / raw_units["charge"]
_w = pec_units["energy"] / raw_units["energy"]
# Check if time is given in a units proportional to seconds or in a hh:mm:ss.xxx format
# Convert all hh:mm:ss.xxx formats to seconds using self.timestamp_to_seconds()
relevant_times = ["total_time", "step_time"]
for x in relevant_times:
if isinstance(pec_times[x], (int, float)):
if x == relevant_times[0]:
_tt = pec_times["total_time"] / raw_units["total_time"]
self.pec_data[self.headers_normal.test_time_txt] *= _tt
elif x == relevant_times[1]:
_st = pec_times["step_time"] / raw_units["step_time"]
self.pec_data[self.headers_normal.step_time_txt] *= _st
elif callable(pec_times[x]):
# EDIT jepe 18.06.2020: change to .apply(func) instead of for-loop
# (now the column is of float64 type and behaves properly)
if x == relevant_times[0]:
# col = self.pec_data[self.headers_normal.test_time_txt]
hdr = self.headers_normal.test_time_txt
elif x == relevant_times[1]:
# col = self.pec_data[self.headers_normal.step_time_txt]
hdr = self.headers_normal.test_time_txt
self.pec_data[hdr] = self.pec_data[hdr].apply(pec_times[x])
# for i in range(len(col)):
# col[i] = pec_times[x](col[i])
v_txt = self.headers_normal.voltage_txt
i_txt = self.headers_normal.current_txt
self.pec_data[v_txt] *= _v
self.pec_data[i_txt] *= _i
self.pec_data[self.headers_normal.charge_capacity_txt] *= _c
self.pec_data[self.headers_normal.discharge_capacity_txt] *= _c
self.pec_data[self.headers_normal.charge_energy_txt] *= _w
self.pec_data[self.headers_normal.discharge_energy_txt] *= _w
def _rename_header(self, h_old, h_new):
try:
self.pec_data.rename(
columns={h_new: self.cellpy_headers[h_old]}, inplace=True
)
except KeyError as e:
logging.info(f"Problem during conversion to cellpy-format ({e})")
def _find_header_length(self):
skiprows = 0
resultscheck = False # Ignore number of delimiters inside RESULTS CHECK
with open(self.filename, "r") as header:
for line in header:
if line in self.fake_header_length:
resultscheck = not resultscheck
if (
line.count(self.pec_file_delimiter) > 1 and not resultscheck
): # End when there are >2 columns
break
skiprows += 1
return skiprows
@staticmethod
def timestamp_to_seconds(timestamp): # Changes hh:mm:s.xxx time format to seconds
total_secs = 0
# strptime can not handle more than 24 hours, days are counted manually
hours = int(timestamp[:2])
if hours >= 24:
days = hours // 24
total_secs += days * 3600 * 24
timestamp = str(hours - 24 * days) + timestamp[2:]
total_secs += (
datetime.strptime(timestamp, "%H:%M:%S.%f")
- datetime.strptime("00:00:00.000", "%H:%M:%S.%f")
).total_seconds()
return total_secs
if __name__ == "__main__":
pass
```
#### File: utils/batch_tools/batch_helpers.py
```python
import logging
import os
import warnings
import pandas as pd
import csv
import itertools
from cellpy import filefinder, prms
from cellpy.exceptions import ExportFailed, NullData, WrongFileVersion
import cellpy.parameters.internal_settings
# logger = logging.getLogger(__name__)
hdr_summary = cellpy.parameters.internal_settings.get_headers_summary()
hdr_journal = cellpy.parameters.internal_settings.get_headers_journal()
def look_up_and_get(cellpy_file_name, table_name, root=None):
"""Extracts table from cellpy hdf5-file."""
# infoname = '/CellpyData/info'
# dataname = '/CellpyData/dfdata'
# summaryname = '/CellpyData/dfsummary'
# fidname = '/CellpyData/fidtable'
# stepname = '/CellpyData/step_table'
if root is None:
root = "/CellpyData"
table_path = "/".join([root, table_name])
logging.debug(f"look_up_and_get({cellpy_file_name}, {table_name}")
store = pd.HDFStore(cellpy_file_name)
try:
table = store.select(table_path)
store.close()
except KeyError as e:
logging.warning("Could not read the table")
store.close()
raise WrongFileVersion(e)
return table
def create_folder_structure(project_name, batch_name):
"""This function creates a folder structure for the batch project.
The folder structure consists of main working folder ``project_name`
located in the ``outdatadir`` (as defined in the cellpy configuration file)
with a sub-folder named ``batch_name``. It also creates a folder
inside the ``batch_name`` folder for storing the raw data.
If the folders does not exist, they will be made. The function also returns
the name of the info-df.
Args:
project_name: name of the project
batch_name: name of the batch
Returns: (info_file, (project_dir, batch_dir, raw_dir))
"""
out_data_dir = prms.Paths["outdatadir"]
project_dir = os.path.join(out_data_dir, project_name)
batch_dir = os.path.join(project_dir, batch_name)
raw_dir = os.path.join(batch_dir, "raw_data")
# create folders
if not os.path.isdir(project_dir):
os.mkdir(project_dir)
if not os.path.isdir(batch_dir):
os.mkdir(batch_dir)
if not os.path.isdir(raw_dir):
os.mkdir(raw_dir)
# create file-name for the info_df (json)
info_file = "cellpy_batch_%s.json" % batch_name
info_file = os.path.join(project_dir, info_file)
return info_file, (project_dir, batch_dir, raw_dir)
def find_files(info_dict, file_list=None, pre_path=None, **kwargs):
"""Find files using cellpy.filefinder.
Args:
info_dict: journal pages.
file_list: list of files names to search through.
pre_path: path to prepend found files from file_list (if file_list is given).
**kwargs: sent to filefinder.search_for_files.
Returns:
info_dict
"""
# searches for the raw data files and the cellpyfile-name
# TODO: implement faster file searching
# TODO: implement option for not searching for raw-file names if force_cellpy is True
for run_name in info_dict[hdr_journal["filename"]]:
logging.debug(f"checking for {run_name}")
raw_files, cellpyfile = filefinder.search_for_files(
run_name, file_list=file_list, pre_path=pre_path, **kwargs
)
if not raw_files:
raw_files = None
info_dict[hdr_journal["raw_file_names"]].append(raw_files)
info_dict[hdr_journal["cellpy_file_name"]].append(cellpyfile)
return info_dict
def fix_groups(groups):
"""Takes care of strange group numbers."""
_groups = []
for g in groups:
try:
if not float(g) > 0:
_groups.append(1000)
else:
_groups.append(int(g))
except TypeError as e:
logging.info("Error in reading group number (check your db)")
logging.debug(g)
logging.debug(e)
_groups.append(1000)
return _groups
def save_multi(data, file_name, sep=";"):
"""Convenience function for storing data column-wise in a csv-file."""
logging.debug("saving multi")
with open(file_name, "w", newline="") as f:
logging.debug(f"{file_name} opened")
writer = csv.writer(f, delimiter=sep)
try:
writer.writerows(itertools.zip_longest(*data))
logging.info(f"{file_name} OK")
except Exception as e:
logging.info(f"Exception encountered in batch._save_multi: {e}")
raise ExportFailed
logging.debug("wrote rows using itertools in _save_multi")
def make_unique_groups(info_df):
"""This function cleans up the group numbers a bit."""
# fixes group numbering
unique_g = info_df[hdr_journal.group].unique()
unique_g = sorted(unique_g)
new_unique_g = list(range(len(unique_g)))
info_df[hdr_journal.sub_group] = info_df[hdr_journal.group] * 0
for i, j in zip(unique_g, new_unique_g):
counter = 1
for indx, row in info_df.loc[info_df[hdr_journal.group] == i].iterrows():
info_df.at[indx, hdr_journal.sub_group] = counter
counter += 1
info_df.loc[info_df[hdr_journal.group] == i, hdr_journal.group] = j + 1
return info_df
def _remove_date_and_celltype(label,):
parts = label.split("_")
parts.pop(0)
if parts[-1] in ["cc", "ec", "eth"]:
parts.pop(-1)
return "_".join(parts)
def create_labels(label, *args):
"""Returns a re-formatted label (currently it only removes the dates
from the run-name)"""
return _remove_date_and_celltype(label)
def create_selected_summaries_dict(summaries_list):
"""Creates a dictionary with summary column headers.
Examples:
>>> summaries_to_output = ["discharge_capacity", "charge_capacity"]
>>> summaries_to_output_dict = create_selected_summaries_dict(
>>> summaries_to_output
>>> )
>>> print(summaries_to_output_dict)
{'discharge_capacity': "Discharge_Capacity(mAh/g)",
'charge_capacity': "Charge_Capacity(mAh/g)}
Args:
summaries_list: list containing cellpy summary column id names
Returns: dictionary of the form {cellpy id name: cellpy summary
header name,}
"""
selected_summaries = dict()
for h in summaries_list:
selected_summaries[h] = hdr_summary[h]
return selected_summaries
def pick_summary_data(key, summary_df, selected_summaries):
"""picks the selected pandas.DataFrame"""
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
value = selected_summaries_dict[key]
return summary_df.iloc[:, summary_df.columns.get_level_values(1) == value]
def join_summaries(summary_frames, selected_summaries, keep_old_header=False):
"""parse the summaries and combine based on column (selected_summaries)"""
if not summary_frames:
raise NullData("No summaries available to join")
selected_summaries_dict = create_selected_summaries_dict(selected_summaries)
frames = []
keys = [] # test-name
for key in summary_frames:
keys.append(key)
if summary_frames[key].empty:
logging.debug("Empty summary_frame encountered")
frames.append(summary_frames[key])
out = []
summary_df = pd.concat(frames, keys=keys, axis=1)
for key, value in selected_summaries_dict.items():
_summary_df = summary_df.iloc[
:, summary_df.columns.get_level_values(1) == value
]
_summary_df.name = key
if not keep_old_header:
try:
_summary_df.columns = _summary_df.columns.droplevel(-1)
except AttributeError as e:
logging.debug("could not drop level from frame")
logging.debug(e)
out.append(_summary_df)
logging.debug("finished joining summaries")
return out
def generate_folder_names(name, project):
"""Creates sensible folder names."""
out_data_dir = prms.Paths.outdatadir
project_dir = os.path.join(out_data_dir, project)
batch_dir = os.path.join(project_dir, name)
raw_dir = os.path.join(batch_dir, "raw_data")
return out_data_dir, project_dir, batch_dir, raw_dir
def _extract_dqdv(cell_data, extract_func, last_cycle):
"""Simple wrapper around the cellpy.utils.ica.dqdv function."""
from cellpy.utils.ica import dqdv
list_of_cycles = cell_data.get_cycle_numbers()
if last_cycle is not None:
list_of_cycles = [c for c in list_of_cycles if c <= int(last_cycle)]
logging.debug(f"only processing up to cycle {last_cycle}")
logging.debug(f"you have {len(list_of_cycles)} cycles to process")
out_data = []
for cycle in list_of_cycles:
try:
c, v = extract_func(cycle)
v, dq = dqdv(v, c)
v = v.tolist()
dq = dq.tolist()
except NullData as e:
v = list()
dq = list()
logging.info(" Ups! Could not process this (cycle %i)" % cycle)
logging.info(" %s" % e)
header_x = "dQ cycle_no %i" % cycle
header_y = "voltage cycle_no %i" % cycle
dq.insert(0, header_x)
v.insert(0, header_y)
out_data.append(v)
out_data.append(dq)
return out_data
def export_dqdv(cell_data, savedir, sep, last_cycle=None):
"""Exports dQ/dV data from a CellpyData instance.
Args:
cell_data: CellpyData instance
savedir: path to the folder where the files should be saved
sep: separator for the .csv-files.
last_cycle: only export up to this cycle (if not None)
"""
logging.debug("exporting dqdv")
filename = cell_data.cell.loaded_from
no_merged_sets = ""
firstname, extension = os.path.splitext(filename)
firstname += no_merged_sets
if savedir:
firstname = os.path.join(savedir, os.path.basename(firstname))
logging.debug(f"savedir is true: {firstname}")
outname_charge = firstname + "_dqdv_charge.csv"
outname_discharge = firstname + "_dqdv_discharge.csv"
list_of_cycles = cell_data.get_cycle_numbers()
number_of_cycles = len(list_of_cycles)
logging.debug("%s: you have %i cycles" % (filename, number_of_cycles))
# extracting charge
out_data = _extract_dqdv(cell_data, cell_data.get_ccap, last_cycle)
logging.debug("extracted ica for charge")
try:
save_multi(data=out_data, file_name=outname_charge, sep=sep)
except ExportFailed as e:
logging.info("could not export ica for charge")
warnings.warn(f"ExportFailed exception raised: {e}")
else:
logging.debug("saved ica for charge")
# extracting discharge
out_data = _extract_dqdv(cell_data, cell_data.get_dcap, last_cycle)
logging.debug("extracted ica for discharge")
try:
save_multi(data=out_data, file_name=outname_discharge, sep=sep)
except ExportFailed as e:
logging.info("could not export ica for discharge")
warnings.warn(f"ExportFailed exception raised: {e}")
else:
logging.debug("saved ica for discharge")
```
#### File: cellpy/utils/easyplot.py
```python
import logging
import os
import warnings
from pathlib import Path
from re import S
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import lines
from matplotlib.artist import kwdoc
from matplotlib.lines import Line2D
from matplotlib.scale import LogScale
from matplotlib.ticker import FuncFormatter
import cellpy
from cellpy import log
from cellpy.utils.batch_tools.batch_journals import LabJournal
from cellpy.parameters.internal_settings import (
get_headers_journal,
keys_journal_session,
)
hdr_journal = get_headers_journal()
# Dictionary of all possible user input arguments(as keys) with example values of correct type
# Value is a tuple (immutable) of type and default value.
USER_PARAMS = {
"cyclelife_plot": (bool, True),
"cyclelife_separate_data": (
bool,
False,
), # will plot each cyclelife datafile in separate plots
"cyclelife_percentage": (bool, False),
"cyclelife_coulombic_efficiency": (bool, False),
"cyclelife_coulombic_efficiency_ylabel": (str, "Coulombic efficiency [%]"),
"cyclelife_charge_c_rate": (bool, False),
"cyclelife_discharge_c_rate": (bool, False),
"cyclelife_c_rate_ylabel": (str, "Effective C-rate"),
"cyclelife_ir": (bool, False), # Allows user to plot IR data aswell
"cyclelife_xlabel": (str, "Cycles"),
"cyclelife_ylabel": (str, r"Capacity $\left[\frac{mAh}{g}\right]$"),
"cyclelife_ylabel_percent": (str, "Capacity retention [%]"),
"cyclelife_legend_outside": (
bool,
False,
), # if True, the legend is placed outside the plot
"cyclelife_degradation_slope": (
bool,
False,
), # Adds simple degradation slope regression to plot
"capacity_determination_from_ratecap": (
bool,
False,
), # If True, uses the ratecap and capacity to determine the exp capacity
"galvanostatic_plot": (bool, True),
"galvanostatic_potlim": (tuple, None), # min and max limit on potential-axis
"galvanostatic_caplim": (tuple, None),
"galvanostatic_xlabel": (str, r"Capacity $\left[\frac{mAh}{g}\right]$"),
"galvanostatic_ylabel": (str, "Cell potential [V]"),
"galvanostatic_normalize_capacity": (
bool,
False,
), # Normalizes all cycles' capacity to 1.
"dqdv_plot": (bool, False),
"dqdv_potlim": (tuple, None), # min and max limit on potential-axis
"dqdv_dqlim": (tuple, None),
"dqdv_xlabel": (
str,
r"dQ/dV $\left[\frac{mAh}{gV}\right]$",
), # TODO what unit? jees
"dqdv_ylabel": (str, "Cell potential [V]"),
"specific_cycles": (list, None),
"exclude_cycles": (list, None),
"all_in_one": (
bool,
False,
), # Decides if everything should be plotted in the same plot in GC and dQdV plot
"only_dischg": (bool, False), # Only show discharge curves
"only_chg": (bool, False), # Only show charge curves
"outpath": (str, "./"),
"outtype": (str, ".png"), # What file format to save in
"outname": (str, None), # Overrides the automatic filename generation
"figsize": (tuple, (6, 4)), # 6 inches wide, 4 inches tall
"figres": (int, 100), # Dots per Inch
"figtitle": (str, "Title"), # None = original filepath
"save_figures": (bool, True),
"save_journal": (bool, False), # Save journal
}
def help():
"""Method of the EasyPlot class which prints some helptext in addition to all supported params."""
## Prints out help page of this module
help_str = (
"The easyplot extension to cellpy aims to easily plot data in a pretty manner.\n"
"In order to use this function, you must import cellpy, and easyplot from cellpy.utils.\n"
"\n"
"Usage:\n"
"Create list of datafiles you want to plot on the following format:\n"
"\n"
"files = [\n"
"\t'./folder/filename.ext',\n"
"\t'./folder/filename2.ext',\n"
"\t]\n"
"\n"
"And then call the easyplot.plot function with the files list as the first parameter, and any optional keyword arguments.\n"
"Here is an example of the use of all keyword arguments:\n"
)
for kw in USER_PARAMS:
if type(USER_PARAMS[kw][1]) == str:
insert = "'" + USER_PARAMS[kw][1] + "'"
else:
insert = str(USER_PARAMS[kw][1])
help_str += "\t" + kw + " = " + insert + ",\n"
print(help_str)
class EasyPlot:
"""Main easyplot class.
Takes all the inputs from the user in its kwargs upon object initialization.
Gathers data, handles and plots it when object.plot() is called.
Help: type easyplot.help()
"""
def __init__(self, files=None, nicknames=None, journal=None, **kwargs):
"""Initialization function of the EasyPlot class.
Input parameters:
filenames (list of strings).
nicknames (list of strings), must match length of filenames.
journal (str or pathlib.Path object): journal file name (should not be used if files is given).
any kwargs: use easyplot.help() to print all kwargs to terminal.
Returns:
easyplot object
Most basic usage:
ezpltobj = easyplot.EasyPlot(["name1", "name2"], None)"""
# Make all user input variables of self
self.files = files
self.nicknames = nicknames
self.kwargs = kwargs
# More needed variables
self.figs = []
self.file_data = []
self.use_arbin_sql = False
if journal is not None:
self.journal_file = Path(journal)
else:
self.journal_file = None
self.journal = None
# Dictionary of all possible user input arguments(as keys) with example values of correct type
# Value is a tuple (immutable) of type and default value.
self.user_params = USER_PARAMS
# Create 'empty' attributes for later use
self.outpath = None
self.masses = None
self.labels = None
self.nom_caps = None
self.colors = None
# List of available colors
# Fill in the rest of the variables from self.user_params if the user didn't specify
self.fill_input()
# Verify that the user input is sufficient
self.verify_input()
self._generate_list_of_available_colors()
def _generate_list_of_available_colors(self):
if 19 >= len(self.files) > 10:
self.colors = [
"#e6194b",
"#3cb44b",
"#ffe119",
"#4363d8",
"#f58231",
"#911eb4",
"#46f0f0",
"#f032e6",
"#bcf60c",
"#fabebe",
"#008080",
"#e6beff",
"#9a6324",
"#fffac8",
"#800000",
"#aaffc3",
"#808000",
"#ffd8b1",
"#000075",
"#808080",
"#000000",
]
warnings.warn(
"You inserted more than 10 datafiles! In a desperate attempt to keep "
"the plots tidy, another colorpalette with 19 distinct colors were chosen."
)
elif len(self.files) > 19:
warnings.warn(
"You inserted more than 19 datafiles! We do not have that "
"many colors in the palette, this some colors are beeing recycled. "
"Keep track of the filenames and legends and make sure this doesn't confuse you."
)
else:
self.colors = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
] * 5
def plot(self):
"""This is the method the user calls on his/hers easyplot object in order to gather the data and plot it.
Usage: object.plot()"""
# Load all cellpy files
logging.debug("starting plotting")
for file in self.files:
if isinstance(file, (list, tuple)):
logging.debug("linked files provided - need to merge")
linked_files = True
else:
linked_files = False
# If using arbin sql
if self.use_arbin_sql:
cpobj = cellpy.get(
filename=file, instrument="arbin_sql"
) # Initiate cellpy object
else: # Not Arbin SQL? Then its probably a local file
# Check that file(s) exist
if linked_files:
file_name = "_".join(file)
for _f in file:
if not os.path.isfile(_f):
logging.error("File not found: " + str(_f))
raise FileNotFoundError
else:
file_name = file
if not os.path.isfile(file):
logging.error("File not found: " + str(file))
print(os.getcwd())
raise FileNotFoundError
cpobj = cellpy.get(filename=file) # Load regular file
# Check that we get data
if cpobj is None:
warnings.warn(
f"File reader returned no data for filename {file}. Please make sure that the file exists or "
f"that the data exists in an eventual database."
)
# Get ID of all cycles
cyc_nums = cpobj.get_cycle_numbers()
# Only get the cycles which both exist in data, and that the user want
if self.kwargs["specific_cycles"] is not None:
cyc_not_available = (
set(cyc_nums) ^ set(self.kwargs["specific_cycles"])
) & set(self.kwargs["specific_cycles"])
if len(cyc_not_available) > 0:
warn_str = (
f"You want to plot cycles which are not available in the data! Datafile(s): "
f"{file}"
f", Cycle(s): {str(cyc_not_available)}"
)
warnings.warn(warn_str)
cyc_nums = list(
set(cyc_nums).intersection(self.kwargs["specific_cycles"])
)
if self.kwargs["exclude_cycles"] is not None:
cyc_nums = list(set(cyc_nums) - set(self.kwargs["exclude_cycles"]))
color = self.give_color() # Get a color for the data
self.file_data.append((cpobj, cyc_nums, color, file_name))
# Check kwargs/input parameters to see what plots to make
if self.kwargs["cyclelife_plot"]:
self.plot_cyclelife()
if self.kwargs["galvanostatic_plot"] and not self.kwargs["dqdv_plot"]:
self.plot_gc()
if self.kwargs["dqdv_plot"] and not self.kwargs["galvanostatic_plot"]:
self.plot_dQdV()
if self.kwargs["galvanostatic_plot"] and self.kwargs["dqdv_plot"]:
self.plot_gc_and_dQdV()
if self.kwargs["capacity_determination_from_ratecap"]:
self.plot_cap_from_rc()
self._wrap_up()
def _wrap_up(self):
# saving journal file
if self.kwargs["save_journal"]:
if self.journal is not None:
if self.outpath is not None:
journal_file_path = Path(self.outpath) / self.journal_file.name
else:
journal_file_path = self.journal_file.name
# if we want to enforce that the file will be a xlsx file:
# journal_file_path = journal_file_path.with_suffix(".xlsx")
journal_file_path = journal_file_path.with_suffix(".json")
self.journal.to_file(
file_name=journal_file_path, paginate=False, to_project_folder=False
)
xlsx_journal_file_path = journal_file_path.with_name(
f"{journal_file_path.stem}.xlsx"
)
self.journal.to_file(
file_name=xlsx_journal_file_path,
paginate=False,
to_project_folder=False,
)
def verify_input(self):
"""Verifies that the users' input to the object is correct."""
# Check that output dir exist (or create one)
self.outpath = self.handle_outpath() # Takes care of the output path
# Check the nicknames
if self.nicknames:
if len(self.nicknames) != len(self.files):
logging.error(
"Use nicknames = None, or specify exactly one nickname per datafile. You have specified "
+ str(len(self.nicknames))
+ " nicknames while inputting "
+ str(len(self.files))
+ " datafiles"
)
raise AssertionError
# Check that all kwargs are used correctly
for key in self.kwargs:
# Check that input parameter exist
try:
self.user_params[key]
except KeyError as e:
logging.error(
"Input parameter "
+ key
+ " is not a valid parameter! Please see example configuration for help or run easyplot.help()"
)
# Check that the type is correct
if type(self.kwargs[key]) != self.user_params[key][0] and type(
self.kwargs[key]
) != type(None):
logging.error(
"Type of inputparameter for keyword '"
+ key
+ "' is wrong. The user specified "
+ str(type(self.kwargs[key]))
+ " but the program needs a "
+ str(self.user_params[key][0])
)
raise TypeError
# Check that the user isn't trying to plot "only" both discharge and charge.
if self.kwargs["only_dischg"] and self.kwargs["only_chg"]:
logging.error(
"You can't plot 'only' discharge AND charge curves! Set one to False please."
)
if self.journal_file is not None:
# Check that the user isn't providing both a list of files and a journal filename
if self.files is not None:
logging.error(
"You can't give both filenames and a journal file at the same time."
)
logging.error("Chose either filenames OR journal file name please.")
raise ValueError
self._read_journal_file()
self._populate_from_journal() # Temporary fix - the parameters should be read directly from journal later
else:
if self.files is None:
logging.error("No file names provided.")
logging.error("Add file names OR journal file name please.")
raise ValueError
def _read_journal_file(self):
logging.debug(f"reading journal file {self.journal_file}")
journal = LabJournal(db_reader=None)
journal.from_file(self.journal_file, paginate=False)
self.journal = journal
def _populate_from_journal(self):
logging.debug(f"populating from journal")
# populating from only a subset of the available journal columns
# - can be increased later
try:
self.files = self.journal.pages[hdr_journal["raw_file_names"]].to_list()
except AttributeError:
logging.debug("No raw files found in your journal")
try:
self.masses = self.journal.pages[hdr_journal["mass"]].to_list()
except AttributeError:
logging.debug("No masses found in your journal")
try:
self.labels = self.journal.pages[hdr_journal["label"]].to_list()
except AttributeError:
logging.debug("No labels found in your journal")
try:
self.nom_cap = self.journal.pages[hdr_journal["nom_cap"]].to_list()
except AttributeError:
logging.debug("No nominal capacity found in your journal")
try:
self.cellpy_files = self.journal.pages[
hdr_journal["cellpy_file_name"]
].to_list()
except AttributeError:
logging.debug("No cellpy files found in your journal")
def fill_input(self):
"""Fill in the rest of the variables from self.user_params if the user didn't specify"""
# Can't just join dicts since they have differing formats, need to loop...
for key in self.user_params:
try:
self.kwargs[key]
except KeyError:
self.kwargs[key] = self.user_params[key][1]
def set_arbin_sql_credentials(
self,
server="localhost",
uid="sa",
pwd="<PASSWORD>",
driver="ODBC Driver 17 for SQL Server",
):
"""Sets cellpy.prms.Instruments.Arbin details to fit what is inserted.
Parameters: Server = 'IP of server', uid = 'username', pwd = 'password', driver = 'ODBC Driver 17 for SQL Server' """
cellpy.prms.Instruments.Arbin["SQL_server"] = server
cellpy.prms.Instruments.Arbin["SQL_UID"] = uid
cellpy.prms.Instruments.Arbin["SQL_PWD"] = pwd
cellpy.prms.Instruments.Arbin["SQL_Driver"] = driver
self.use_arbin_sql = True
def give_color(self):
"""Picks the first color from the color list and gives it away"""
color = self.colors[0]
self.colors = self.colors[1:]
return color
def give_fig(self):
"""Gives figure to whoever asks and appends it to figure list"""
fig, ax = plt.subplots(figsize=(6, 4))
self.figs.append((fig, ax))
return fig, ax
def handle_outpath(self):
"""Makes sure that self.outpath exists, or creates it."""
out_path = self.kwargs["outpath"]
# should make this a pathlib.Path object - but not sure if str is assumed later on in the code
if os.path.isdir(out_path):
logging.debug(f"out path set to {out_path}")
return out_path
elif not os.path.isdir(out_path):
logging.debug(f"outpath does not exits - creating")
try:
os.makedirs(out_path)
logging.debug(f"out path set to {out_path}")
return out_path
except OSError as e:
logging.error(
f"Cannot create output directory {out_path}. Please make sure you "
f"have write permission. Error message: {e}"
)
def plot_cyclelife(self):
"""Takes all the parameters inserted in the object creation and plots cyclelife"""
# Spawn fig and axis for plotting
if not self.kwargs["cyclelife_separate_data"]:
fig, ax = self.give_fig()
if self.kwargs["cyclelife_coulombic_efficiency"]:
# Spawn twinx axis and set label
ax_ce = ax.twinx()
ax_ce.set(ylabel=self.kwargs["cyclelife_coulombic_efficiency_ylabel"])
if (
self.kwargs["cyclelife_charge_c_rate"]
or self.kwargs["cyclelife_discharge_c_rate"]
):
ax_c_rate = ax.twinx()
def format_label(x, pos):
# The commented out code here makes the fractioned C-rate like C/50 and so on.
"""
if x >= 1:
s = '%.2gC' % x
elif x == 0:
s = r'C/$\infty$'
else:
newfloat = 1/x
s = 'C/%.2g' % newfloat
"""
# The following just has decimal place C-rate.
s = "%.3gC" % x
return s
ax_c_rate.yaxis.set_major_formatter(FuncFormatter(format_label))
ax_c_rate.set(ylabel="Effective C-rate")
if self.kwargs["cyclelife_ir"]:
ax_ir = ax.twinx()
outpath = self.outpath
for cpobj, cyc_nums, color, filename in self.file_data:
if self.kwargs["cyclelife_separate_data"]:
fig, ax = self.give_fig()
if self.kwargs["cyclelife_coulombic_efficiency"]:
# Spawn twinx axis and set label
ax_ce = ax.twinx()
ax_ce.set(
ylabel=self.kwargs["cyclelife_coulombic_efficiency_ylabel"]
)
if (
self.kwargs["cyclelife_charge_c_rate"]
or self.kwargs["cyclelife_discharge_c_rate"]
):
ax_c_rate = ax.twinx()
def format_label(x, pos):
# The following just has decimal place C-rate.
s = "%.3gC" % x
return s
ax_c_rate.yaxis.set_major_formatter(FuncFormatter(format_label))
ax_c_rate.set(ylabel="Effective C-rate")
if self.kwargs["cyclelife_ir"]:
ax_ir = ax.twinx()
# Get Pandas DataFrame of pot vs cap from cellpy object
df = cpobj.get_cap(
method="forth-and-forth",
label_cycle_number=True,
categorical_column=True,
)
outpath += os.path.basename(filename).split(".")[0] + "_"
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
chgs = [[], []] # List with cycle num and capacity
dchgs = [[], []]
# Accumulate cycles
for cyc in keys: # Loop over all cycles
if cyc in cyc_nums: # Check if it is in list of wanted cycles
cyc_df = cycgrouped.get_group(
cyc
) # Get the group of datapoints from specific cycle
cyc_redox_grouped = cyc_df.groupby(
"direction"
) # Group by direction (meaning if it is charging or discharging)
dchg_df = cyc_redox_grouped.get_group(
-1
) # Data for the discharge curve
dchgs[0].append(cyc) # Append to dchg list
dchgs[1].append(dchg_df["capacity"].iat[-2])
chg_df = cyc_redox_grouped.get_group(1) # Data for the charge curve
chgs[0].append(cyc) # Append to chg list
chgs[1].append(chg_df["capacity"].iat[-2])
if self.kwargs[
"cyclelife_percentage"
]: # Normalize all datapoints on the first one
norm_fact = (
dchgs[1][0] / 100
) # /100 is to get range from 0-100(%) in stead of 0-1
for i in range(len(chgs[1])):
chgs[1][i] /= norm_fact
for i in range(len(dchgs[1])):
dchgs[1][i] /= norm_fact
# Make label from filename or nickname
if self.nicknames:
label = self.nicknames[self.files.index(filename)]
else:
label = str(os.path.basename(filename))
# print("Discharge capacities:")
# print(dchgs[1])
# Actully place it in plot
if not self.kwargs["only_dischg"] and not self.kwargs["only_chg"]:
ax.scatter(
chgs[0], chgs[1], c=color, alpha=0.2,
)
ax.scatter(dchgs[0], dchgs[1], c=color, label=label)
elif self.kwargs["only_dischg"]:
ax.scatter(dchgs[0], dchgs[1], c=color, label=label)
elif self.kwargs["only_chg"]:
ax.scatter(
chgs[0], chgs[1], c=color, alpha=0.2,
)
if self.kwargs["cyclelife_coulombic_efficiency"]:
# Get CE for cyc_nums
coulombic_efficiency = cpobj.cell.summary[
"coulombic_efficiency_u_percentage"
]
cycs = []
CEs = []
for cyc in keys:
if cyc in cyc_nums:
cycs.append(cyc)
CEs.append(coulombic_efficiency[cyc])
# Place it in the plot
ax_ce.scatter(cycs, CEs, c=color, marker="+")
# print(filename + " Dchg 1-3: " + str(dchgs[1][0:3]) + ", CE 1-3: " + str(coulombic_efficiency[0:3]))
if (
self.kwargs["cyclelife_charge_c_rate"]
or self.kwargs["cyclelife_discharge_c_rate"]
):
# charge_c_rate = cpobj.cell.summary["charge_c_rate"] #This gives incorrect c-rates.
stepstable = cpobj.cell.steps
chg_c_rates, dchg_c_rates = get_effective_C_rates(stepstable)
selected_chg_c_rates = []
selected_dchg_c_rates = []
selected_cycs = []
for cyc in keys:
if cyc in cyc_nums:
selected_chg_c_rates.append(chg_c_rates[cyc - 1])
selected_dchg_c_rates.append(dchg_c_rates[cyc - 1])
selected_cycs.append(cyc)
if (
self.kwargs["cyclelife_charge_c_rate"]
and not self.kwargs["cyclelife_discharge_c_rate"]
):
ax_c_rate.scatter(
selected_cycs, selected_chg_c_rates, c=color, marker="_"
)
elif (
not self.kwargs["cyclelife_charge_c_rate"]
and self.kwargs["cyclelife_discharge_c_rate"]
):
ax_c_rate.scatter(
selected_cycs, selected_dchg_c_rates, c=color, marker="_"
)
elif (
self.kwargs["cyclelife_charge_c_rate"]
and self.kwargs["cyclelife_discharge_c_rate"]
):
ax_c_rate.scatter(
selected_cycs, selected_chg_c_rates, c=color, marker="_"
)
ax_c_rate.scatter(
selected_cycs,
selected_dchg_c_rates,
c=color,
alpha=0.2,
marker="_",
)
if self.kwargs["cyclelife_degradation_slope"]:
from scipy.stats import linregress
slope, intercept, r, p, se = linregress(dchgs[0], dchgs[1])
x = np.linspace(0, ax.get_xlim()[1] * 0.9, 10)
degradation_unit = (
r" $\frac{mAh}{g\cdot cycle}$"
if not self.kwargs["cyclelife_percentage"]
else r" $\frac{\%}{cycle}$"
)
intercept_unit = (
r" $\frac{mAh}{g}$"
if not self.kwargs["cyclelife_percentage"]
else r"%"
)
ax.plot(
x,
x * slope + intercept,
c=color,
label="Degradation: %g" % slope
+ degradation_unit
+ "\nIntercept: %g" % intercept
+ intercept_unit
+ ", r=%g" % r,
)
"""if self.kwargs["cyclelife_ir"]:
chg_ir = []
dchg_ir = []
steptable = cpobj.steps
print(steptable)
newdf = steptable[["ir", "cycle", "type"]]
for i,elem in enumerate(newdf.iterrows()):
if elem[1]["type"] == "charge":
chg_ir.append(elem[1]["ir"])
elif elem[1]["type"] == "discharge":
dchg_ir.append(elem[1]["ir"])
print(chg_ir)
for cyc in keys:
if cyc in cyc_nums:
ax_ir.scatter(cyc, chg_ir[cyc], c = color, marker = "*")
"""
if self.kwargs["cyclelife_separate_data"]:
# Set all plot settings from Plot object
self.fix_cyclelife(fig, ax)
# Save fig
savepath = outpath.strip("_") + "_Cyclelife"
self.save_fig(fig, savepath)
if not self.kwargs["cyclelife_separate_data"]:
# Set all plot settings from Plot object
self.fix_cyclelife(fig, ax)
# Save fig
savepath = outpath.strip("_") + "_Cyclelife"
self.save_fig(fig, savepath)
def plot_gc(self):
"""Takes all the parameters inserted in the object creation and plots Voltage-Capacity curves"""
if self.kwargs["all_in_one"]: # Everything goes in the same figure.
fig, ax = self.give_fig()
colors = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
] * 5
savepath = self.outpath
colorbar_incrementor = -1
for cpobj, cyc_nums, color, filename in self.file_data:
# Get Pandas DataFrame of pot vs cap from cellpy object
df = cpobj.get_cap(
method="forth-and-forth",
label_cycle_number=True,
categorical_column=True,
)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Make label from filename or nickname
if self.nicknames:
label = str(self.nicknames[self.files.index(filename)])
else:
label = str(os.path.basename(filename))
# Fix colorbar or cycle colors
if self.kwargs["specific_cycles"] == None: # Plot all cycles
# Set up colormap and add colorbar
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"name", [color, "black"], N=256, gamma=1.0
)
norm = mpl.colors.Normalize(vmin=cyc_nums[0], vmax=cyc_nums[-1])
cbaxes = fig.add_axes(
[1.05 + colorbar_incrementor / 8, 0.1, 0.03, 0.8]
)
colorbar_incrementor += 1
fig.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cbaxes,
label="Cycle number for "
+ os.path.basename(filename).split(".")[0],
)
# fig.colorbar.ax.yaxis.get_major_locator().set_params(integer=True) #TODO fix such that we dont have decimals on the cycle colorbar!!
# Plot cycles
for cyc in keys:
if cyc in cyc_nums:
if self.kwargs["specific_cycles"]:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc / keys[-1])
cyc_df = cycgrouped.get_group(cyc)
if (
not self.kwargs["only_dischg"]
and not self.kwargs["only_chg"]
):
pass
elif self.kwargs["only_dischg"]:
dchg = cyc_df.groupby("direction")
cyc_df = dchg.get_group(-1)
elif self.kwargs["only_chg"]:
chg = cyc_df.groupby("direction")
cyc_df = chg.get_group(1)
# TODO: The way this is set up, when plotting both discharge and charge, the whole cycle is normalized on the maximum capacity, meaning the charge can be normalized on the discharge or the other way around.
if self.kwargs["galvanostatic_normalize_capacity"]:
# Then we normalize capacity column on the max value (since this should be max cap)
maxcap = cyc_df["capacity"].max()
cyc_df["capacity"] = cyc_df["capacity"].div(maxcap)
ax.set_xlabel("Normalized Capacity")
ax.plot(
cyc_df["capacity"],
cyc_df["voltage"],
label=label + ", Cyc " + str(cyc),
c=cyccolor,
)
savepath += os.path.basename(filename).split(".")[0]
fig.suptitle("Galvanostatic cyclingdata")
self.fix_gc(fig, ax)
# Save fig
savepath += "_GC-plot"
self.save_fig(fig, savepath)
else: # Then each data goes in its own figure
for cpobj, cyc_nums, color, filename in self.file_data:
fig, ax = self.give_fig()
# Get Pandas DataFrame of pot vs cap from cellpy object
df = cpobj.get_cap(
method="forth-and-forth",
label_cycle_number=True,
categorical_column=True,
)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Fix colorbar or cycle colors
if self.kwargs["specific_cycles"] == None: # Plot all cycles
# Set up colormap and add colorbar
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"name", [color, "black"], N=256, gamma=1.0
)
norm = mpl.colors.Normalize(vmin=cyc_nums[0], vmax=cyc_nums[-1])
fig.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap), label="Cycle"
)
# fig.colorbar.ax.yaxis.get_major_locator().set_params(integer=True) #TODO fix such that we dont have decimals on the cycle colorbar!!
# Make label from filename or nickname
if self.nicknames:
label = str(self.nicknames[self.files.index(filename)])
else:
label = str(os.path.basename(filename))
# Plot cycles
colors = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
]
for cyc in keys:
if cyc in cyc_nums:
if self.kwargs["specific_cycles"]:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc / keys[-1])
cyc_df = cycgrouped.get_group(cyc)
# TODO: This if elif block is pretty much the same as the one above (for all in one plot), can it be reused in stead of written twice?
if (
not self.kwargs["only_dischg"]
and not self.kwargs["only_chg"]
):
pass
elif self.kwargs["only_dischg"]:
dchg = cyc_df.groupby("direction")
cyc_df = dchg.get_group(-1)
elif self.kwargs["only_chg"]:
chg = cyc_df.groupby("direction")
cyc_df = chg.get_group(1)
# TODO: The way this is set up, when plotting both discharge and charge, the whole cycle is normalized on the maximum capacity, meaning the charge can be normalized on the discharge or the other way around.
if self.kwargs["galvanostatic_normalize_capacity"]:
# Then we normalize capacity column on the max value (since this should be max cap)
maxcap = cyc_df["capacity"].max()
cyc_df["capacity"] = cyc_df["capacity"].div(maxcap)
ax.set_xlabel("Normalized Capacity")
ax.plot(
cyc_df["capacity"],
cyc_df["voltage"],
label=label.split(".")[0] + ", Cyc " + str(cyc),
c=cyccolor,
)
# Set all plot settings from Plot object
fig.suptitle(label)
self.fix_gc(fig, ax)
# Save fig
savepath = (
self.outpath + os.path.basename(filename).split(".")[0] + "_GC-plot"
)
self.save_fig(fig, savepath)
def plot_dQdV(self):
"""Takes all the parameters inserted in the object creation and plots dQdV"""
from cellpy.utils import ica
if self.kwargs["all_in_one"]: # Everything goes in the same figure.
fig, ax = self.give_fig()
colors = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
] * 5
savepath = self.outpath
colorbar_incrementor = -1
for cpobj, cyc_nums, color, filename in self.file_data:
# Get Pandas DataFrame of dQdV
if self.kwargs["only_dischg"]:
_, df = ica.dqdv_frames(cpobj, split=True)
elif self.kwargs["only_chg"]:
df, _ = ica.dqdv_frames(cpobj, split=True)
else:
df = ica.dqdv_frames(cpobj)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Fix colorbar or cycle colors
if self.kwargs["specific_cycles"] == None: # Plot all cycles
# Set up colormap and add colorbar
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"name", [color, "black"], N=256, gamma=1.0
)
norm = mpl.colors.Normalize(vmin=cyc_nums[0], vmax=cyc_nums[-1])
cbaxes = fig.add_axes(
[1.05 + colorbar_incrementor / 8, 0.1, 0.03, 0.8]
)
colorbar_incrementor += 1
fig.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cbaxes,
label="Cycle number for "
+ os.path.basename(filename).split(".")[0],
)
# fig.colorbar.ax.yaxis.get_major_locator().set_params(integer=True) #TODO fix such that we dont have decimals on the cycle colorbar!!
# Plot cycles
for cyc in keys:
if cyc in cyc_nums:
if self.kwargs["specific_cycles"]:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc / keys[-1])
cyc_df = cycgrouped.get_group(cyc)
ax.plot(
cyc_df["voltage"],
cyc_df["dq"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
savepath += os.path.basename(filename).split(".")[0]
fig.suptitle("dQdV")
self.fix_dqdv(fig, ax)
# Save fig
savepath += "_dQdV-plot"
self.save_fig(fig, savepath)
else:
for cpobj, cyc_nums, color, filename in self.file_data:
fig, ax = self.give_fig()
# Get Pandas DataFrame of dQdV
if self.kwargs["only_dischg"]:
_, df = ica.dqdv_frames(cpobj, split=True)
elif self.kwargs["only_chg"]:
df, _ = ica.dqdv_frames(cpobj, split=True)
else:
df = ica.dqdv_frames(cpobj)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Create the plot obj
fig, ax = plt.subplots(figsize=(6, 4))
# Fix colorbar or cycle colors
if self.kwargs["specific_cycles"] == None: # Plot all cycles
# Set up colormap and add colorbar
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"name", [color, "black"], N=256, gamma=1.0
)
norm = mpl.colors.Normalize(vmin=cyc_nums[0], vmax=cyc_nums[-1])
fig.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap), label="Cycle"
)
# fig.colorbar.ax.yaxis.get_major_locator().set_params(integer=True) #TODO fix such that we dont have decimals on the cycle colorbar!!
# Plot cycles
colors = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
]
for cyc in keys:
if cyc in cyc_nums:
if self.kwargs["specific_cycles"]:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc / keys[-1])
cyc_df = cycgrouped.get_group(cyc)
ax.plot(
cyc_df["voltage"],
cyc_df["dq"],
label="Cycle " + str(cyc),
c=cyccolor,
)
# Set all plot settings from Plot object
fig.suptitle(os.path.basename(filename))
self.fix_dqdv(fig, ax)
# Save fig
savepath = (
self.outpath
+ os.path.basename(filename).split(".")[0]
+ "_dQdV-plot"
)
self.save_fig(fig, savepath)
def plot_gc_and_dQdV(self):
"""Takes all the parameters inserted in the object creation and plots Voltage-Curves and dQdV data together"""
from cellpy.utils import ica
if self.kwargs["all_in_one"]: # Everything goes in the same figure.
fig, ax = self.give_fig()
fig.delaxes(ax)
ax1, ax2 = fig.subplots(1, 2, sharey=True)
fig.set_size_inches(8, 4)
fig.subplots_adjust(wspace=0)
colors = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
] * 5
savepath = self.outpath
colorbar_incrementor = -1
for cpobj, cyc_nums, color, filename in self.file_data:
# Get Pandas DataFrame of pot vs cap from cellpy object
df = cpobj.get_cap(
method="forth-and-forth",
label_cycle_number=True,
categorical_column=True,
)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Fix colorbar or cycle colors
if self.kwargs["specific_cycles"] == None: # Plot all cycles
# Set up colormap and add colorbar
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"name", [color, "black"], N=256, gamma=1.0
)
norm = mpl.colors.Normalize(vmin=cyc_nums[0], vmax=cyc_nums[-1])
cbaxes = fig.add_axes(
[1.05 + colorbar_incrementor / 8, 0.1, 0.03, 0.8]
)
colorbar_incrementor += 1
fig.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap),
cax=cbaxes,
label="Cycle number for "
+ os.path.basename(filename).split(".")[0],
pad=0.2,
)
# Plot GC in leftmost plot (ax)
for cyc in keys:
if cyc in cyc_nums:
if self.kwargs["specific_cycles"]:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc / keys[-1])
cyc_df = cycgrouped.get_group(cyc)
if (
not self.kwargs["only_dischg"]
and not self.kwargs["only_chg"]
):
ax1.plot(
cyc_df["capacity"],
cyc_df["voltage"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
elif self.kwargs["only_dischg"]:
dchg = cyc_df.groupby("direction")
dchg_df = dchg.get_group(-1)
ax1.plot(
dchg_df["capacity"],
dchg_df["voltage"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
elif self.kwargs["only_chg"]:
chg = cyc_df.groupby("direction")
chg_df = chg.get_group(1)
ax1.plot(
chg_df["capacity"],
chg_df["voltage"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
# Get Pandas DataFrame for dQdV
if self.kwargs["only_dischg"]:
_, df = ica.dqdv_frames(cpobj, split=True)
elif self.kwargs["only_chg"]:
df, _ = ica.dqdv_frames(cpobj, split=True)
else:
df = ica.dqdv_frames(cpobj)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Plot cycles
for cyc in keys:
if cyc in cyc_nums:
if self.kwargs["specific_cycles"]:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc / keys[-1])
cyc_df = cycgrouped.get_group(cyc)
ax2.plot(
cyc_df["dq"],
cyc_df["voltage"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
savepath += os.path.basename(filename).split(".")[0]
# Set all plot settings from Plot object
fig.suptitle("GC and dQdV")
self.fix_gc_and_dqdv(fig, [ax1, ax2])
# Save fig
savepath = savepath + "_GC-dQdV-plot"
self.save_fig(fig, savepath)
else: # Then all files are placed in separate plots
for cpobj, cyc_nums, color, filename in self.file_data:
fig, ax = self.give_fig()
fig.delaxes(ax)
ax1, ax2 = fig.subplots(1, 2, sharey=True)
fig.set_size_inches(8, 4)
fig.subplots_adjust(wspace=0)
colors = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
] * 5
# Get Pandas DataFrame of pot vs cap from cellpy object
df = cpobj.get_cap(
method="forth-and-forth",
label_cycle_number=True,
categorical_column=True,
)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Fix colorbar or cycle colors
if self.kwargs["specific_cycles"] == None: # Plot all cycles
# Set up colormap and add colorbar
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"name", [color, "black"], N=256, gamma=1.0
)
norm = mpl.colors.Normalize(vmin=cyc_nums[0], vmax=cyc_nums[-1])
fig.colorbar(
mpl.cm.ScalarMappable(norm=norm, cmap=cmap), label="Cycle"
)
# Plot GC in leftmost plot (ax)
for cyc in keys:
if cyc in cyc_nums:
if self.kwargs["specific_cycles"]:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc / keys[-1])
cyc_df = cycgrouped.get_group(cyc)
if (
not self.kwargs["only_dischg"]
and not self.kwargs["only_chg"]
):
ax1.plot(
cyc_df["capacity"],
cyc_df["voltage"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
elif self.kwargs["only_dischg"]:
dchg = cyc_df.groupby("direction")
dchg_df = dchg.get_group(-1)
ax1.plot(
dchg_df["capacity"],
dchg_df["voltage"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
elif self.kwargs["only_chg"]:
chg = cyc_df.groupby("direction")
chg_df = chg.get_group(1)
ax1.plot(
chg_df["capacity"],
chg_df["voltage"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
# Get Pandas DataFrame for dQdV
if self.kwargs["only_dischg"]:
_, df = ica.dqdv_frames(cpobj, split=True)
elif self.kwargs["only_chg"]:
df, _ = ica.dqdv_frames(cpobj, split=True)
else:
df = ica.dqdv_frames(cpobj)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Plot cycles
for cyc in keys:
if cyc in cyc_nums:
if self.kwargs["specific_cycles"]:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc / keys[-1])
cyc_df = cycgrouped.get_group(cyc)
ax2.plot(
cyc_df["dq"],
cyc_df["voltage"],
label=os.path.basename(filename).split(".")[0]
+ ", Cyc "
+ str(cyc),
c=cyccolor,
)
# Set all plot settings from Plot object
fig.suptitle(os.path.basename(filename))
self.fix_gc_and_dqdv(fig, [ax1, ax2])
# Save fig
savepath = (
self.outpath
+ os.path.basename(filename).split(".")[0]
+ "_GC-dQdV-plot"
)
self.save_fig(fig, savepath)
"""# Fix colorbar or cycle colors
if not specific_cycles: # If this is none, then plot all!
# Set up colormap and add colorbar
cmap = mpl.colors.LinearSegmentedColormap.from_list("name", [color, "black"], N=256, gamma=1.0)
norm = mpl.colors.Normalize(vmin=cyc_nums[0], vmax=cyc_nums[-1])
fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),label='Cycle')
## Plot GC on the left subplot (ax[0]) ##
# Get Pandas DataFrame of pot vs cap from cellpy object
df = cpobj.get_cap(method="forth-and-forth", label_cycle_number=True, categorical_column=True)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Plot cycles
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan' ]
for cyc in keys:
if cyc in cyc_nums:
if specific_cycles:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc/keys[-1])
cyc_df = cycgrouped.get_group(cyc)
axs[0].plot(cyc_df["capacity"], cyc_df["voltage"], label="Cycle " + str(cyc), c = cyccolor)
## Plot dQdV on the right subplot (ax[1]) ##
from cellpy.utils import ica
# Get Pandas DataFrame of pot vs cap from cellpy object
df = ica.dqdv_frames(cpobj)
# Group by cycle and make list of cycle numbers
cycgrouped = df.groupby("cycle")
keys = []
for key, item in cycgrouped:
keys.append(key)
# Plot cycles
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan' ]
for cyc in keys:
if cyc in cyc_nums:
if specific_cycles:
cyccolor = colors[0]
colors = colors[1:]
else:
cyccolor = cmap(cyc/keys[-1])
cyc_df = cycgrouped.get_group(cyc)
axs[1].plot(cyc_df["dq"], cyc_df["voltage"], label=str(cyc), c = cyccolor)
# Set all plot settings from Plot object
fig.suptitle(os.path.basename(file))
self.fix_gc_and_dqdv(fig, axs)
# Save fig
savepath = self.outpath + os.path.basename(file).split(".")[0] + "_GC-dQdV-plot"
print("Saving to: " + savepath)
fig.savefig(savepath, bbox_inches='tight')"""
def plot_cap_from_rc(self):
"""Takes all the parameters inserted in the object creation and plots capacity VS inverse c-rate"""
# Spawn fig and axis for plotting
fig, ax = self.give_fig()
# Get labels and handles for legend generation and eventual savefile
handles, labels = ax.get_legend_handles_labels()
# handles.append(Line2D([0], [0], marker='o', color='black', alpha = 0.2, label = 'Charge capacity', linestyle=''))
# handles.append(Line2D([0], [0], marker='o', color='black', alpha = 0.2, label = 'Disharge capacity', linestyle=''))
# handles.append(Line2D([0], [0], marker='+', color='black', label = 'Cap avg per C-rate', linestyle=''))
outpath = self.outpath
for cpobj, cyc_nums, color, filename in self.file_data:
# Get Pandas DataFrame of pot vs cap from cellpy object
# df = cpobj.get_cap(method="forth-and-forth", label_cycle_number=True, categorical_column=True)
outpath += os.path.basename(filename).split(".")[0] + "_"
handles.append(
Line2D([0], [0], marker="o", color=color, label=filename, linestyle="")
)
stepstable = cpobj.cell.steps
chglist, dchglist = get_effective_C_rates_and_caps(stepstable)
# Remove all cycles which are not in cyc_nums by looking at the 0th element (cyc num) of every sublist in chglist
new_chglist = [x for x in chglist if x[0] in cyc_nums]
new_dchglist = [x for x in dchglist if x[0] in cyc_nums]
linregress_xlist = []
linregress_ylist = []
for chg, dchg in zip(new_chglist, new_dchglist):
# print(dchg)
# ax.scatter(chg[1] , chg[2] , color = color, alpha = 0.2)
ax.scatter(1 / dchg[1], dchg[2], color=color, alpha=1)
linregress_xlist.append(1 / dchg[1])
linregress_ylist.append(dchg[2])
# print(linregress_ylist)
# Fitting curve to the exponential function
# Import curve fitting package from scipy
# from scipy.optimize import curve_fit
x_arr = np.array(linregress_xlist)
y_arr = np.array(linregress_ylist)
# Average the capacity for each c-rate
def _reduce_to_averages(xvals, yvals):
"""This function scans through the data and averages relevant points together."""
point_grouped = []
point_lst = []
dists = []
for i in range(1, len(xvals)):
prev_point = np.array((xvals[i - 1], yvals[i - 1]))
curr_point = np.array((xvals[i], yvals[i]))
dev = 0.3
if (
(prev_point * (1 - dev))[0]
< curr_point[0]
< (prev_point * (1 + dev))[0]
):
# If this point is within dev (percentage sort of) of last point, then its in the same c-rate
point_lst.append(curr_point)
else:
# New c-rate
point_grouped.append(point_lst)
point_lst = []
print(point_grouped)
x_arr = []
y_arr = []
for group in point_grouped:
stacked_arr = np.stack(group, axis=1)
averaged_arr = np.average(stacked_arr, axis=1)
x_arr.append(averaged_arr[0])
y_arr.append(averaged_arr[1])
print(x_arr)
print(y_arr)
return x_arr, y_arr
# x_arr, y_arr = _reduce_to_averages(x_arr, y_arr)
# ax.scatter(x_arr, y_arr, marker="+")
# def _exp_func(x,a,b,c):
# return -a* (b**x) + a + -a * (b**(x+c)) +a
# pars, cov = curve_fit(f=_exp_func, p0 = [50, 0.7, 0], xdata = x_arr, ydata=y_arr, bounds = ([0,0.1, -20],[1e9, 1, 20]))
# x_vals = np.linspace(min(x_arr), max(x_arr), 100) #x_arr[0], x_arr[-1], 100)
# ax.plot(x_vals, _exp_func(x_vals, *pars))
# ax.hlines(max(y_arr), ax.get_xlim()[0], ax.get_xlim()[1], colors = color, linestyle='--')
# Get the standard deviations of the parameters (square roots of the # diagonal of the covariance)
# std_dev = np.sqrt(np.diag(cov))
# Make a sweet legend to put on this
# handles.append(
# Line2D(
# [0], [0],
# marker="_", color=color,
# label = 'Calculated maximum capacity:' + '\n' +'{:.2e} $\pm$ {:.2e}'.format(pars[0], std_dev[0]) + r'$\left[\mu Ah\right]$', linestyle=''
# ))
ax.hlines(
max(y_arr),
ax.get_xlim()[0],
ax.get_xlim()[1],
colors=color,
linestyle="--",
)
handles.append(
Line2D(
[0],
[0],
marker="_",
color=color,
label="Highest capacity:"
+ "\n"
+ "{:.2e}".format(max(y_arr))
+ r"$\left[\mu Ah\right]$",
linestyle="",
)
)
self.fix_cap_from_rc(fig, ax, handles)
# Save fig
savepath = outpath + "CapDet"
self.save_fig(fig, savepath)
def fix_cyclelife(self, fig, ax):
"""Makes the finishing touches to the cyclelife plot"""
# Applies kwargs settings and other plot settings
## Parameters which could be user defined later
"""
ax.set(
xticks = (np.arange(0, 150), step=20)),
yticks = (np.arange(3, 5, step=0.2)),
)
"""
# Get labels and handles for legend generation and eventual savefile
handles, labels = ax.get_legend_handles_labels()
if not self.kwargs["only_dischg"]:
handles.append(
Line2D(
[0],
[0],
marker="o",
color="black",
alpha=0.2,
label="Charge capacity",
linestyle="",
)
)
if self.kwargs["cyclelife_coulombic_efficiency"]:
handles.append(
Line2D(
[0],
[0],
marker="+",
color="black",
alpha=1,
label="Coulombic Efficiency",
linestyle="",
)
)
if (
self.kwargs["cyclelife_charge_c_rate"]
and not self.kwargs["cyclelife_discharge_c_rate"]
):
handles.append(
Line2D(
[0],
[0],
marker="_",
color="black",
alpha=1,
label="Effective charge C-rate",
linestyle="",
)
)
elif (
not self.kwargs["cyclelife_charge_c_rate"]
and self.kwargs["cyclelife_discharge_c_rate"]
):
handles.append(
Line2D(
[0],
[0],
marker="_",
color="black",
alpha=1,
label="Effective discharge C-rate",
linestyle="",
)
)
elif (
self.kwargs["cyclelife_charge_c_rate"]
and self.kwargs["cyclelife_discharge_c_rate"]
):
handles.append(
Line2D(
[0],
[0],
marker="_",
color="black",
alpha=1,
label="Effective charge C-rate",
linestyle="",
)
)
handles.append(
Line2D(
[0],
[0],
marker="_",
color="black",
alpha=0.2,
label="Effective discharge C-rate",
linestyle="",
)
)
# The params below should always be like this.
ax.tick_params(direction="in", top="true", right="true")
ax.xaxis.get_major_locator().set_params(integer=True)
# Apply all kwargs to plot
try:
# Cyclelife plot details
ax.set(xlabel=self.kwargs["cyclelife_xlabel"])
if self.kwargs["cyclelife_percentage"]:
ax.set(ylabel=self.kwargs["cyclelife_ylabel_percent"])
else:
ax.set(ylabel=self.kwargs["cyclelife_ylabel"])
# General plot details
fig.set_size_inches(self.kwargs["figsize"])
if type(self.kwargs["figtitle"]) == str:
fig.suptitle(self.kwargs["figtitle"])
else:
fig.suptitle("Capacity versus Cycle life")
except Exception as e:
logging.error(e)
# Take care of having the legend outside the plot
if self.kwargs["cyclelife_legend_outside"]:
if (
self.kwargs["cyclelife_coulombic_efficiency"]
or self.kwargs["cyclelife_charge_c_rate"]
or self.kwargs["cyclelife_discharge_c_rate"]
):
ax.legend(handles=handles, bbox_to_anchor=(1.18, 1), loc="upper left")
else:
ax.legend(handles=handles, bbox_to_anchor=(1.05, 1), loc="upper left")
figsize = self.kwargs["figsize"]
fig.set_size_inches((figsize[0] + 3, figsize[1]))
else:
ax.legend(handles=handles)
fig.tight_layout() # Needed to not clip ylabel on coulombic efficiency
def fix_cap_from_rc(self, fig, ax, handles):
"""Makes the finishing touches to the capacity vs inverse C-rate plot"""
ax.tick_params(direction="in", top="true", right="true")
ax.set(
xlabel=r"Inverse C-rate $\left[ h \right]$",
ylabel=r"Capacity $\left[\mu Ah \right]$",
)
# General plot details
fig.set_size_inches(self.kwargs["figsize"])
if type(self.kwargs["figtitle"]) == str:
fig.suptitle(self.kwargs["figtitle"])
else:
fig.suptitle("Capacity determination from Rate Capability")
# Take care of having the legend outside the plot
if self.kwargs["cyclelife_legend_outside"]:
ax.legend(handles=handles, bbox_to_anchor=(1.05, 1), loc="upper left")
figsize = self.kwargs["figsize"]
fig.set_size_inches((figsize[0] + 3, figsize[1]))
else:
ax.legend(handles=handles)
fig.tight_layout() # Needed to not clip ylabel on coulombic efficiency
def fix_gc(self, fig, ax):
"""Makes the finishing touches to the voltage-curves plot"""
# Applies kwargs settings and other plot settings
## Parameters which could be user defined later
"""
ax.set(
xticks = (np.arange(0, 150), step=20)),
yticks = (np.arange(3, 5, step=0.2)),
)
"""
# The params below should always be like this.
ax.tick_params(direction="in", top="true", right="true")
# Apply all kwargs to plot
try:
# Galvanostatic plot details
ax.set(xlabel=self.kwargs["galvanostatic_xlabel"])
ax.set(ylabel=self.kwargs["galvanostatic_ylabel"])
ax.set(ylim=self.kwargs["galvanostatic_potlim"])
ax.set(xlim=self.kwargs["galvanostatic_caplim"])
if self.kwargs["specific_cycles"] != None:
ax.legend()
# General plot details
fig.set_size_inches(self.kwargs["figsize"])
if type(self.kwargs["figtitle"]) == str:
fig.suptitle(self.kwargs["figtitle"])
except Exception as e:
logging.error(e)
def fix_dqdv(self, fig, ax):
"""Makes the finishing touches to the dQdV plot"""
# Applies kwargs settings and other plot settings
## Parameters which could be user defined later
"""
ax.set(
xticks = (np.arange(0, 150), step=20)),
yticks = (np.arange(3, 5, step=0.2)),
)
"""
# The params below should always be like this.
ax.tick_params(direction="in", top="true", right="true")
# Apply all kwargs to plot
try:
# Cyclelife plot details
ax.set(xlabel=self.kwargs["dqdv_xlabel"])
ax.set(ylabel=self.kwargs["dqdv_ylabel"])
ax.set(ylim=self.kwargs["dqdv_dqlim"])
ax.set(xlim=self.kwargs["dqdv_potlim"])
if self.kwargs["specific_cycles"] != None:
ax.legend()
# General plot details
fig.set_size_inches(self.kwargs["figsize"])
if type(self.kwargs["figtitle"]) == str:
fig.suptitle(self.kwargs["figtitle"])
except Exception as e:
logging.error(e)
def fix_gc_and_dqdv(self, fig, axs):
"""Makes the finishing touches to the dQdV / Voltage curves plot"""
for ax in axs:
# The params below should always be like this.
ax.tick_params(direction="in", top="true", right="true")
# Apply all kwargs to plot
try:
# dQdV plot details
axs[1].set(
xlabel=self.kwargs["dqdv_ylabel"]
) # switched x and y label since this dQdV plot is flipped to match the adjacent gc plot
axs[1].set(ylabel="") # Empty since we already have potential on gc axs
axs[1].set(ylim=self.kwargs["galvanostatic_potlim"])
axs[1].set(xlim=self.kwargs["dqdv_dqlim"])
# Galvanostatic plot details
axs[0].set(xlabel=self.kwargs["galvanostatic_xlabel"])
axs[0].set(ylabel=self.kwargs["galvanostatic_ylabel"])
axs[0].set(ylim=self.kwargs["galvanostatic_potlim"])
axs[0].set(xlim=self.kwargs["galvanostatic_caplim"])
if self.kwargs["specific_cycles"] != None:
axs[0].legend()
# General plot details
fig.set_size_inches(self.kwargs["figsize"])
if type(self.kwargs["figtitle"]) == str:
fig.suptitle(self.kwargs["figtitle"])
except Exception as e:
print(e)
logging.error(e)
def save_fig(self, fig, savepath):
"""The point of this is to have savefig parameters the same across
all plots (for now just fig dpi and bbox inches)"""
if self.kwargs.get("save_figures", True):
if self.kwargs["outname"]:
savepath = (
self.kwargs["outpath"]
+ self.kwargs["outname"]
+ self.kwargs["outtype"]
)
else:
savepath += self.kwargs["outtype"]
print("Saving to: " + savepath)
fig.savefig(savepath, bbox_inches="tight", dpi=self.kwargs["figres"])
def get_effective_C_rates(steptable):
newdf = steptable[["step_time_avr", "cycle", "type"]]
chg_c_rates = []
dchg_c_rates = []
for i, elem in enumerate(newdf.iterrows()):
if elem[1]["type"] == "charge":
chg_c_rates.append(1 / (elem[1]["step_time_avr"] / 3600))
elif elem[1]["type"] == "discharge":
dchg_c_rates.append(1 / (elem[1]["step_time_avr"] / 3600))
return chg_c_rates, dchg_c_rates
def get_effective_C_rates_and_caps(steptable):
newdf = steptable[
["step_time_avr", "cycle", "type", "charge_avr", "discharge_last"]
]
chglist = (
[]
) # [[cycle, chg_crate, chg_cap], [cycle increase with crates and capacities for this cycle]]
dchglist = []
for i, elem in enumerate(newdf.iterrows()):
cyc = elem[1]["cycle"]
if elem[1]["type"] == "charge":
chglist.append(
[
cyc,
1 / (elem[1]["step_time_avr"] / 3600),
elem[1]["charge_avr"] * 1000,
]
)
elif elem[1]["type"] == "discharge":
dchglist.append(
[
cyc,
1 / (elem[1]["step_time_avr"] / 3600),
elem[1]["discharge_last"] * 1000 * 1000,
]
)
return chglist, dchglist
def main():
log.setup_logging(default_level="DEBUG")
f1 = Path("../../testdata/data/20160805_test001_45_cc_01.res")
f2 = Path("../../testdata/data/20160805_test001_47_cc_01.res")
raw_files = [f1, f2]
nicknames = ["cell1", "cell2"]
logging.debug(raw_files)
logging.debug(nicknames)
ezplt = EasyPlot(raw_files, nicknames, figtitle="Test1", save_figures=True)
ezplt.plot()
plt.show()
return
def _dev_journal_loading():
log.setup_logging(default_level="DEBUG")
journal_file = Path("../../testdata/db/cellpy_batch_test.json")
ezplt = EasyPlot(
None,
journal=journal_file,
figtitle="Test1",
save_figures=False,
save_journal=True,
outpath="./tmp/",
)
ezplt.plot()
# plt.show()
return
if __name__ == "__main__":
print(" running easyplot ".center(80, "-"))
_dev_journal_loading()
print(" finished ".center(80, "-"))
```
#### File: cellpy/tests/test_maccor.py
```python
import tempfile
import shutil
import pytest
import logging
from cellpy import log, get
from . import fdv
log.setup_logging(default_level=logging.DEBUG)
@pytest.fixture
def cellpy_data_instance():
from cellpy import cellreader
return cellreader.CellpyData()
@pytest.fixture
def dataset():
from cellpy import cellreader
d = cellreader.CellpyData()
d.load(fdv.mpr_cellpy_file_path)
return d
def test_set_instrument(cellpy_data_instance):
import os
instrument = "maccor_txt"
cellpy_data_instance.set_instrument(instrument=instrument)
cellpy_data_instance.from_raw(fdv.mcc_file_path, sep="\t")
cellpy_data_instance.make_step_table()
cellpy_data_instance.make_summary()
assert len(cellpy_data_instance.cell.raw) == 6704
temp_dir = tempfile.mkdtemp()
logging.debug(f"created a temporary directory and dumping csv there ({temp_dir})")
cellpy_data_instance.to_csv(datadir=temp_dir)
assert len(os.listdir(temp_dir)) > 0
shutil.rmtree(temp_dir)
def test_cellpy_get(cellpy_data_instance):
instrument = "maccor_txt"
c = get(fdv.mcc_file_path, instrument=instrument, sep="\t")
assert len(c.cell.raw) == 6704
```
|
{
"source": "jepegit/coffeshop",
"score": 2
}
|
#### File: jepegit/coffeshop/test_schedules.py
```python
import pytest
import schedule_tester
def test_open_schedule():
print(schedule_tester)
def
```
|
{
"source": "jepemo/basic-agent",
"score": 2
}
|
#### File: basic-agent/bagent/core.py
```python
import asyncio
import logging
from contextlib import ContextDecorator
from bagent.messages import MessageContext
logger = logging.getLogger(__name__)
class AgentMixin(object):
def __init__(self, loop, debug):
self.next_pid = 1
self.parent_ctx = None
self.parent = None
self.parent_pid = '0'
self.agents = {}
self.tasks = {}
self.loop = loop
self.debug = debug
self.pid = '0'
self.messages = asyncio.Queue(loop=loop)
self.fn = None
self.args = None
self.kwargs = None
def create_agent(self, agent_fn, *args, **kwargs):
agent = AgentContext(self.loop, debug=self.debug)
pid = '{0}-{1}'.format(self.pid, self.next_pid)
agent.pid = pid
agent.parent = self
agent.parent_pid = self.pid
agent.fn = agent_fn
agent.args = args
agent.kwargs = kwargs
agent.parent_ctx = self
self.agents[pid] = agent
self.next_pid += 1
logger.debug("Agent created: {0}".format(pid))
return pid
async def recv(self):
logger.debug("{0} waiting for message...".format(self.pid))
return await self.messages.get()
def get_message(self):
return MessageContext(self)
async def send(self, pid, msg, sender=None):
if not sender:
sender = self.pid
logger.debug("From {0}, sending {1} to {2}".format(sender, str(msg), pid))
if self.pid == pid:
await self.messages.put((sender, msg))
else:
agent = self._find_child_path(pid)
if agent:
await agent.send(pid, msg, sender=sender)
else:
if self.pid == 0:
logger.warning("PID {0} not FOUND".format(pid))
else:
await self.parent.send(pid, msg, sender=sender)
def _find_child_path(self, pid):
for apid, agent in self.agents.items():
if apid.startswith(pid):
return agent
return None
class AgentContext(AgentMixin):
def __init__(self, loop, debug=False):
AgentMixin.__init__(self, loop, debug)
self.running_agents = 0
def _decr_children_count(self, fn):
async def func(*args, **kwargs):
await fn(*args, **kwargs)
self.running_agents -= 1
logger.debug("Agent terminated: {0}".format(self.pid))
return func
async def start(self, agent_fn, *args, **kwargs):
agent_fn = self._decr_children_count(agent_fn)
pid = self.create_agent(agent_fn, *args, **kwargs)
agent = self.agents[pid]
task = asyncio.ensure_future(agent.execute())
self.tasks[pid] = task
self.running_agents += 1
return pid
def _wait_children(self, fn):
async def func(*args, **kwargs):
await fn(*args, **kwargs)
while self.running_agents > 0:
await asyncio.sleep(1)
return func
async def execute(self):
logger.debug("Starting agent: {0}".format(self.pid))
self.fn = self._wait_children(self.fn)
await self.fn(self, *self.args, **self.kwargs)
class RootContext(ContextDecorator, AgentMixin):
def __init__(self, loop, debug=False):
ContextDecorator.__init__(self)
AgentMixin.__init__(self, loop, debug)
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
def start(self, agent_fn, *args, **kwargs):
pid = self.create_agent(agent_fn, *args, **kwargs)
return pid
def send(self):
raise Exception("Not implemented")
def recv(self):
raise Exception("Not implemented")
def __enter__(self):
return self
def __exit__(self, *exc):
tasks = [agent.execute() for agent in self.agents.values()]
self.loop.run_until_complete(asyncio.wait(tasks))
self.loop.close()
return False
```
#### File: basic-agent/bagent/messages.py
```python
import re
class MessageHandler(object):
def __init__(self, msg, sender, ctx):
self.ctx = ctx
self.msg = msg
self.sender = sender
def is_int(self):
return self.is_type(int)
def is_str(self):
return self.is_type(str)
def is_float(self):
return self.is_type(float)
def is_re(self, expr):
if not self.is_type(str):
return False
else:
p = re.compile(expr)
return p.match(self.msg) is not None
def is_type(self, clazz):
return isinstance(self.msg, clazz)
def match_int(self, fn):
if self.is_int():
fn(self.msg)
def match_str(self, fn):
if self.is_str():
fn(self.msg)
def match_float(self, fn):
if self.is_float():
fn(self.msg)
def match_re(self, expr, fn):
if self.is_re(expr):
fn(self.msg)
def match(self, obj, fn):
for key, value in obj.items():
print(key, value)
def respond(self, resp_msg):
self.respond_to(self.sender, resp_msg)
def respond_to(self, resp_pid, resp_msg):
self.ctx.send(resp_pid, resp_msg)
class MessageContext:
def __init__(self, ctx):
self.ctx = ctx
def __enter__(self):
raise TypeError("Use async with instead")
def __exit__(self, exc_type, exc_val, exc_tb):
pass # pragma: no cover
async def __aenter__(self):
(sender, msg) = await self.ctx.recv()
return MessageHandler(msg, sender, self.ctx)
async def __aexit__(self, exc_type, exc_val, exc_tb):
return False
```
|
{
"source": "jepenven-silabs/connectedhomeip",
"score": 2
}
|
#### File: mdns/minimal/format_test.py
```python
import argparse
import asyncio
import coloredlogs
import logging
from dataclasses import dataclass
from construct import *
LOCAL_ADDR = ("0.0.0.0", 5388)
# LOCAL_ADDR = ("::%178", 5388)
DEST_ADDR = ("192.168.3.11", 5353)
# DEST_ADDR = ("fdf8:f53e:61e4::18", 5353)
QUERY_QNAME = "_googlecast._tcp.local"
def EndsWithEmpty(x, lst, ctx):
return not x
class QNameValidator(Validator):
def _validate(self, obj, context, path):
return obj[-1] == ""
class QNameArrayAdapter(Adapter):
def _decode(self, obj, context, path):
return ".".join(map(str, obj[:-1]))
def _encode(self, obj, context, path):
return list(map(str, obj.split("."))) + [""]
@dataclass
class AnswerPtr:
offset: int
class AnswerPart(Subconstruct):
def __init__(self):
self.name = "AnswerPart"
self.subcon = PascalString
self.flagbuildnone = False
self.parsed = None
def _parse(self, stream, context, path):
# read from the stream
# return object
len = stream.read(1)[0]
if (len & 0xC0) == 0xC0:
l2 = stream.read(1)[0]
return AnswerPtr(((len & 0x3F) << 8) | l2)
else:
return stream.read(len)
def _build(self, obj, stream, context, path):
# write obj to the stream
# return same value (obj) or a modified value
# that will replace the context dictionary entry
raise Error("Answer part build not yet implemented")
def _sizeof(self, context, path):
# return computed size (when fixed size or depends on context)
# or raise SizeofError (when variable size or unknown)
raise SizeofError("Answer part has avariable size")
def EndsWithEmptyOrPointer(x, lst, ctx):
return (not x) or isinstance(x, AnswerPtr)
class IpAddressAdapter(Adapter):
def _decode(self, obj, context, path):
return ".".join(map(str, obj))
def _encode(self, obj, context, path):
return list(map(int, obj.split(".")))
IpAddress = IpAddressAdapter(Byte[4])
HEX = HexDump(GreedyBytes)
QNAME = QNameArrayAdapter(
QNameValidator(RepeatUntil(EndsWithEmpty, PascalString(Byte, "utf8"))))
DNSAnswer = Struct(
"NAME" / RepeatUntil(EndsWithEmptyOrPointer, AnswerPart()),
"TYPE" / Enum(
Int16ub,
A=1,
NS=2,
CNAME=5,
SOA=6,
WKS=11,
PTR=12,
MX=15,
TXT=16,
AAA=28,
SRV=33,
),
"CLASS" / BitStruct(
"FlushCache" / Flag,
"CLASS" / Enum(BitsInteger(15), IN=1),
),
"TTL" / Int32ub,
"RDATA" / Prefixed(
Int16ub,
Switch(
this.TYPE, {
"TXT": GreedyRange(PascalString(Byte, "utf8")),
"A": IpAddress,
"AAAA": Array(16, Byte),
"PTR": RepeatUntil(EndsWithEmptyOrPointer, AnswerPart()),
},
default=GreedyBytes)),
)
DNSQuery = Struct(
"ID" / Int16ub,
"Control" / BitStruct(
"QR" / Default(Flag, False),
"OpCode" /
Default(Enum(BitsInteger(4), QUERY=0, IQUERY=1, STATUS=2), "QUERY"),
"AA" / Default(Flag, False),
"TC" / Default(Flag, False),
"RD" / Default(Flag, False),
"RA" / Default(Flag, False),
"Z" / Padding(1),
"AD" / Default(Flag, False),
"CD" / Default(Flag, False),
"Rcode" / Default(
Enum(
BitsInteger(4),
OK=0,
FORMAT_ERROR=1,
SERVER_FAILURE=2,
NAME_ERROR=3,
NOT_IMPLEMENTED=4,
REFUSED=5,
),
"OK",
),
),
"QuestionCount" / Rebuild(Int16ub, len_(this.Questions)),
"AnswerCount" / Rebuild(Int16ub, len_(this.Answers)),
"AuthorityCount" / Rebuild(Int16ub, len_(this.Authorities)),
"AdditionalCount" / Rebuild(Int16ub, len_(this.Additionals)),
"Questions" / Array(
this.QuestionCount,
Struct(
"QNAME" / QNAME,
"QTYPE" / Default(
Enum(
Int16ub,
A=1,
NS=2,
CNAME=5,
SOA=6,
WKS=11,
PTR=12,
MX=15,
SRV=33,
AAAA=28,
ANY=255,
), "ANY"),
"QCLASS" / BitStruct(
"Unicast" / Default(Flag, False),
"Class" / Default(Enum(BitsInteger(15), IN=1, ANY=255), "IN"),
),
),
),
"Answers" / Default(Array(this.AnswerCount, DNSAnswer), []),
"Authorities" / Default(Array(this.AuthorityCount, DNSAnswer), []),
"Additionals" / Default(Array(this.AdditionalCount, DNSAnswer), []),
)
class EchoClientProtocol:
def __init__(self, on_con_lost):
self.on_con_lost = on_con_lost
self.transport = None
def connection_made(self, transport):
self.transport = transport
query = DNSQuery.build({
"ID": 0x1234,
"Questions": [
{
"QNAME": QUERY_QNAME,
"QCLASS": {
"Unicast": True
}
},
],
"Answers": [],
"Authorities": [],
"Additionals": [],
})
logging.info("Connection made")
logging.info("Sending:\n%s", DNSQuery.parse(query))
logging.info("BINARY:\n%s", HEX.parse(query))
self.transport.sendto(query, DEST_ADDR)
logging.info("Query sent")
def datagram_received(self, data, addr):
logging.info("Received reply from: %r", addr)
logging.debug(HEX.parse(data))
logging.info(DNSQuery.parse(data))
def error_received(self, exc):
logging.error("Error")
def connection_lost(self, exc):
logging.error("Lost connection")
self.on_con_lost.set_result(True)
async def main():
loop = asyncio.get_running_loop()
client_done = loop.create_future()
transport, protocol = await loop.create_datagram_endpoint(
lambda: EchoClientProtocol(client_done),
local_addr=LOCAL_ADDR
)
try:
await client_done
finally:
transport.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="mDNS test app")
parser.add_argument(
"--log-level",
default=logging.INFO,
type=lambda x: getattr(logging, x),
help="Configure the logging level.",
)
args = parser.parse_args()
logging.basicConfig(
level=args.log_level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
coloredlogs.install(level=args.log_level)
asyncio.run(main())
```
|
{
"source": "jepetersohn/haikuna-matata",
"score": 3
}
|
#### File: jepetersohn/haikuna-matata/runner_test.py
```python
import unittest
import runner
class TestRunnerMethods(unittest.TestCase):
def test_count_syllables(self):
syllables = runner.countSyllables('hello')
self.assertEqual(syllables, 2)
def test_is_haiku(self):
trueHaiku = runner.isHaiku("I am a haiku trust me this is a haiku, I'm dead serious")
falseHaiku = runner.isHaiku("I'm a donut")
self.assertTrue(trueHaiku)
self.assertFalse(falseHaiku)
#def test_generate_random_haiku(self):
# haiku = runner.generateHaiku('the')
# self.assertIsInstance(haiku, basestring)
# self.assertEqual(haiku, 'Hello, World')
def test_pick_random_word(self):
word = runner.pickRandomWord(1)
from models import Unigram
queryWord = Unigram.query.filter(Unigram.word1 == word)[0].word1
self.assertIsInstance(word, basestring)
self.assertEqual(word, queryWord)
def test_generate_line(self):
line = runner.generateLine(4, "the")
lineSyllables = runner.countSyllables(line)
self.assertEqual(lineSyllables, 5)
def test_grab_possible_words(self):
from models import Unigram
unigrams = Unigram.query.filter(Unigram.word1 == 'the')
possibleWords = runner.grabPossibleWords("the", 1)
self.assertIsInstance(possibleWords, list)
self.assertEqual(len(possibleWords), 1)
if __name__ == '__main__':
unittest.main()
```
|
{
"source": "jepetolee/CRYPTONALYTICS",
"score": 2
}
|
#### File: CRYPTONALYTICS/TrainingModel/Chad.py
```python
import warnings
import numpy as np
import sys
import PIL
from binance import ThreadedWebsocketManager
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
sys.path.append('..')
from time import sleep
from ValueCrypto import Trader
from binance.client import Client
from datetime import datetime
from make_pd import *
from torchvision import transforms
from TradeAlgorithm import DatasetFinal, TradeDataSetOut, update_future_15min_csv, update_future_1min_csv, \
update_future_1hour_csv
from tqdm import tqdm, trange
import torch.nn as nn
import torch.nn.functional as F
import torch
import gc
import pandas as pd
from torch.distributions import Categorical
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_finance import candlestick2_ohlc
def build_numpy(data, temper):
fig = plt.figure(figsize=(20, 20))
gs = gridspec.GridSpec(2, 4)
axes = list()
axes.append(plt.subplot(gs[0, :]))
axes.append(plt.subplot(gs[1, :], sharex=axes[0]))
candlestick2_ohlc(axes[0], data[0], data[1], data[2], data[3], width=1, colorup='r', colordown='b')
axes[1].bar(data.index + 1, data[4], color='k', width=0.8, align='center')
copies = data[3].copy()
axes[0].plot(data.index + 1, copies.rolling(window=3).mean(), label='Ma3')
axes[0].plot(data.index + 1, copies.rolling(window=14).mean(), label='Ma14')
plt.savefig('D:/CRYPTONALYTICS/TrainingModel/' + temper + '_day.png', dpi=100)
plt.close('all')
X = PIL.Image.open('D:/CRYPTONALYTICS/TrainingModel/' + temper + '_day.png').convert("L")
x = np.array(X)
X.close()
return x
def DayRealChad(symbol, device, leveragen, impulse=-1, saved=False, grad_lock=False): # (XRP,BNB,BTC,ETH)
print("AwesomeChad")
trader = Trader(device).to(device)
client = Client(api_key="", api_secret="")
if saved:
trader.load_state_dict(torch.load('./model/' + symbol + '_trader.pt'))
trans = transforms.Compose([transforms.ToTensor(),
transforms.Resize(size=(500, 500)),
transforms.Normalize(0.5, 0.5)])
# <---------------------------------------------------------------------->
current_time = datetime.now().strftime("%H:%M:%S")
print(current_time + '์ ์์')
# <---------------------------------------------------------------------->
onehour = client.futures_klines(symbol=symbol, interval='1d', limit=1500)
onehour = pd.DataFrame(np.array(onehour, dtype=np.float)[-45:].T[1:6].T)
onehour = build_numpy(onehour, symbol)
s_oneH = trans(onehour).float().to(device).reshape(-1, 1, 500, 500)
sleep(0.1)
fifteen_data = client.futures_klines(symbol=symbol, interval='15m', limit=1500)
fifteen_data = pd.DataFrame(np.array(fifteen_data, dtype=np.float)[-120:].T[1:6].T)
fifteen_data = build_numpy(fifteen_data, symbol)
s_oneF = trans(fifteen_data).float().to(device).reshape(-1, 1, 500, 500)
oneminute_data = client.futures_klines(symbol=symbol, interval='4h', limit=1500)
oneminute_data = pd.DataFrame(np.array(oneminute_data, dtype=np.float)[-60:].T[1:6].T)
oneminute_data = build_numpy(oneminute_data, symbol)
s_oneM = trans(oneminute_data).float().to(device).reshape(-1, 1, 500, 500)
# <---------------------------------------------------------------------->
hidden = (
torch.zeros([1, 1, 16], dtype=torch.float).to(device), torch.zeros([1, 1, 16], dtype=torch.float).to(device))
h_in = [hidden, hidden, hidden]
benefit = 100
selecter = True
position = False
t = 0
locker, ring = 20, 1
while True:
sleep(1)
# <---------------------------------------------------------------------->
current_price = float(client.futures_symbol_ticker(symbol=symbol, limit=1500)['price'])
if selecter:
with torch.no_grad():
position_t, h_out = trader.SetPosition(s_oneH, s_oneF, s_oneM, h_in)
position_t = position_t.detach().reshape(-1)
position_a = Categorical(position_t).sample()
position_prob = position_t[position_a.item()]
s_oneHP = s_oneH
s_oneMP = s_oneM
s_oneFP = s_oneF
h_inP = h_in
h_outP = h_out
if position_a == 0:
position_v = 'LONG'
selected_price = current_price
selecter = False
elif position_a == 1:
position_v = 'SHORT'
selected_price = current_price
selecter = False
else:
position_v = 'NONE'
selected_price = current_price
selecter = False
print(position_v + ': ', current_price)
# <---------------------------------------------------------------------->
difference = (0.9998 * current_price - selected_price)
if position_v == 'SHORT':
difference = (0.9998 * selected_price - current_price)
if position_v == 'NONE' and difference > 0:
difference *= -1
# <---------------------------------------------------------------------->
percent = leveragen * difference / selected_price * 100
# <---------------------------------------------------------------------->
if percent < impulse:
selecter = True
percent = impulse
reward = -1
ring = 1
if difference > locker * ring:
ring += 1
position = True
if position:
if difference <= locker * (ring - 1)-10:
difference = locker * (ring - 1)-10
reward = ring-1
selecter = True
position = False
ring = 1
elif position_v is 'NONE':
percent = 0
sleep(10800)
reward = 0
ring = 1
selecter = True
if selecter:
if position_v is not 'NONE':
sleep(700)
benefit *= (1 + percent / 100)
onehour = client.futures_klines(symbol=symbol, interval='4h', limit=1500)
onehour = pd.DataFrame(np.array(onehour, dtype=np.float)[-45:].T[1:6].T)
onehour = build_numpy(onehour, symbol)
sprime_oneH = trans(onehour).float().to(device).reshape(-1, 1, 500, 500)
oneminute_data = client.futures_klines(symbol=symbol, interval='1h', limit=1500)
oneminute_data = pd.DataFrame(np.array(oneminute_data, dtype=np.float)[-60:].T[1:6].T)
oneminute_data = build_numpy(oneminute_data, symbol)
sprime_oneM = trans(oneminute_data).float().to(device).reshape(-1, 1, 500, 500)
fifteen_data = client.futures_klines(symbol=symbol, interval='15m', limit=1500)
fifteen_data = pd.DataFrame(np.array(fifteen_data, dtype=np.float)[-120:].T[1:6].T)
fifteen_data = build_numpy(fifteen_data, symbol)
sprime_oneF = trans(fifteen_data).float().to(device).reshape(-1, 1, 500, 500)
trader.TrainModelP(s_oneHP, s_oneFP, s_oneMP,
sprime_oneH, sprime_oneF, sprime_oneM, h_inP, h_outP,
position_a, position_prob, reward)
torch.save(trader.state_dict(), './model/' + symbol + '_trader.pt')
print(str(round(benefit, 2)) + "% " + position_v + " reward is " + str(round(percent, 2)),
current_price)
s_oneM = sprime_oneM
s_oneF = sprime_oneF
s_oneH = sprime_oneH
h_in = h_out
if t % 75 == 0:
print(current_price, percent)
t += 1
DayRealChad('BTCUSDT', 'cpu', 125, impulse=-10, saved=True) # ํฐ๊ฑฐ 20 ์์๊ฑฐ 5
'''
SOLUSDT
'TRXUSDT',
'AVAXUSDT',
'NEARUSDT',
'USDCUSDT'
'''
```
|
{
"source": "JephDiel/BCI",
"score": 4
}
|
#### File: JephDiel/BCI/main.py
```python
import pygame
from pygame.constants import K_DOLLAR, K_DOWN, K_ESCAPE, K_LEFT, K_RIGHT, K_UP
from pygame.time import Clock
import random
from ThinkGear import ThinkGear
from collections import deque
import numpy as np
my_device = ThinkGear("COM5")
reads_per_data = 256
raw_data = deque(maxlen=reads_per_data) #192
data_size = 180
time_per_data = 1
lastdata = 0
training_data_input = []
training_data_output = []
data_index = 0
def saveData(dir):
global data_index
global training_data_input
global training_data_output
training_data_input += [list(raw_data)]
training_data_output += [[0,0,0,0]]
training_data_output[-1][dir] = 1
data_index += 1
pygame.init()
# Set up the drawing window
screen = pygame.display.set_mode([500, 500])
font = pygame.font.Font(pygame.font.get_default_font(), 36)
# Run until the user asks to quit
running = True
pos = (250,250)
dir = (0,0)
clock = Clock()
speed = 2
score = 0
foodpos = (250, 100)
while running:
delta = 1 / float(clock.tick(reads_per_data))
# Did the user click the window close button?
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN and len(raw_data) == reads_per_data:
if event.key == K_UP:
print("Up")
dir = (0, -speed)
saveData(0)
if event.key == K_DOWN:
print("Down")
dir = (0, speed)
saveData(1)
if event.key == K_LEFT:
print("Left")
dir = (-speed, 0)
saveData(2)
if event.key == K_RIGHT:
print("Right")
dir = (speed, 0)
saveData(3)
if event.key == K_ESCAPE:
running = False
# Fill the background with white
screen.fill((50, 50, 50))
if(len(raw_data) == reads_per_data):
pygame.draw.circle(screen, (0, 150, 0), foodpos, 5)
# Draw a solid blue circle in the center
pygame.draw.circle(screen, (200, 200, 0), pos, 10)
x = (pos[0] + (dir[0] * delta)) % 500
y = (pos[1] + (dir[1] * delta)) % 500
pos = (x,y)
text_surface = font.render("Score: " + str(score), True, (250, 250, 250))
screen.blit(text_surface, dest=(10,10))
if ((pos[0]-foodpos[0])**2 + (pos[1] - foodpos[1]) ** 2) < 225:
score += 1
print("Score")
while ((pos[0]-foodpos[0])**2 + (pos[1] - foodpos[1]) ** 2) < 225:
foodpos = (random.randint(10, 490), random.randint(40, 490))
# Flip the display
pygame.display.flip()
my_device.fetch_data()
data = my_device.data
if 'eeg_raw' in data:
raw_data.append(data['eeg_raw'])
lastdata = data['eeg_raw']
else:
raw_data.append(lastdata)
# Done! Time to quit.
pygame.quit()
my_device.close()
training_data_input = np.array(training_data_input)
training_data_output = np.array(training_data_output)
print("Adding " + str(training_data_input.shape[0]) + " Items with " + str(training_data_input.shape[1]) + " elements each")
try:
old_training_input = np.load("trainingdata/input3.npy")
old_training_output = np.load("trainingdata/output3.npy")
training_data_input = np.concatenate((training_data_input, old_training_input))
training_data_output = np.concatenate((training_data_output, old_training_output))
except:
print("Old Data Not Found, Overriding")
np.save("trainingdata/input3.npy", training_data_input)
np.save("trainingdata/output3.npy", training_data_output)
print("Total " + str(training_data_input.shape[0]) + " Items with " + str(training_data_input.shape[1]) + " elements each")
```
#### File: JephDiel/BCI/ThinkGear.py
```python
import serial, math#, pygame
codes = [0x02, 0x03, 0x04, 0x05, 0x06, 0x80, 0x83 ]
names = ["quality","heartrate","attention","meditation","8bit_raw","eeg_raw","eeg_asic"]
c_len = [1, 1, 1, 1, 1, 3, 25 ]
bands = ["delta","theta","low-alpha","high-alpha","low-beta","high-beta","low-gamma","mid-gamma"]
#convert signed bit/byte array to int
def signed_thing_to_int(b, length):
return b-((b >> (length-1)) & 1)*2**length #return b if first bit is 0, otherwise subtract max value representable with given number of bits and return
'''EEG Device Class'''
class ThinkGear(object):
def __init__(self, port, baudrate=57600):
self.ser = serial.Serial(port, baudrate) #initialize serial communication/connection
self.data = {}
def fetch_data(self):
self.data = {} #reset values
while True:
self.ser.read_until(b"\xAA\xAA") #wait for sync bytes
plength = ord(self.ser.read(1)) #payload length
payload = self.ser.read(plength) #read entire payload of given length
checksum = ~(int(math.fsum([b for b in payload])) & 0xFF) & 0xFF #calculate checksum by doing... checksum-calculation stuff (described in the docs)
if checksum == ord(self.ser.read(1)): break #checksums match, move on
else: print("ERROR: Checksum mismatch!") #checksum mismatch, repeat
i = 0
while i < len(payload)-1:
code = payload[i]
if code in codes: #check if current byte is a supported code
c = codes.index(code) #find corresponding index in the three code-related lists above
'''old code which I prefer (because it's technically one line) (sadly without a way to add comments)
self.data[names[c]] = payload[i+1] if c < 5 \
else signed_thing_to_int(payload[i+2] << 8 | payload[i+3], 16) if c == 5 \
else dict(zip(bands, [payload[b] << 16 | payload[b+1] << 8 | payload[b+2] for b in range(i+1, i+25, 3)]))
'''
if c < 5: #all single-byte codes (quality, heartrate, attention, meditation, 8bit_raw)
self.data[names[c]] = payload[i+1]
elif c == 5: #eeg_raw (fun fact: the first byte after the code is completely useless)
self.data[names[c]] = signed_thing_to_int(payload[i+2] << 8 | payload[i+3], 16)
elif c == 6: #eeg_asic
self.data[names[c]] = dict(zip(bands, [payload[b] << 16 | payload[b+1] << 8 | payload[b+2] for b in range(i+1, i+25, 3)]))
i += c_len[c] #add code-specific number of bytes to i
i += 1 #add 1 each time to avoid getting stuck on unused bytes
def close(self):
self.ser.close()
# print("Connecting Thinkgear")
# eeg_device = ThinkGear("COM5", 9600)
# print ("Connected")
# '''Visualization Stuff'''
# vis_points = 640 #number of eeg readings to be plotted at once
# size = (640, 480) #window size in pixels
# x_vals = [int(size[0]*(x+0.5)/vis_points) for x in range(vis_points)]
# y_vals = [int(size[1]/2) for x in range(vis_points)]
# surface = pygame.display.set_mode(size) #initialize window
# pygame.display.set_caption("EEG Visualizer") #...
# raw_eeg_range = 8192 #technically 2*32768=65536 (2^16), but for some reason it doesn't use the full available range
# clock = pygame.time.Clock()
# while True:
# try:
# clock.tick(30)
# print("Fetching")
# eeg_device.fetch_data()
# print("Fetched")
# if len(eeg_device.data) == 1: y_vals = y_vals[1:]+[int(size[1]/2-size[1]*eeg_device.data["eeg_raw"]/raw_eeg_range)]
# surface.fill((0,0,0)) #Do I really need to explain this?
# points = list(zip(x_vals, y_vals)) #zip x and y values to pairs (list(zip([x0,x1,...xN], [y0,y1,...,yN])) = [(x0,y0),(x1,y1),...,(xN,yN)])
# pygame.draw.lines(surface, (255,255,255), False, points) #draw continuous line segments through points
# pygame.display.flip() #display changes
# except KeyboardInterrupt: #I don't even know if this works, heck. <insert wrinkly Pikachu>
# pygame.quit()
```
|
{
"source": "jephstahl/mpf-docs",
"score": 2
}
|
#### File: scriptlets/scriptlets/test_scriptlet.py
```python
from mpf.core.scriptlet import Scriptlet
class TestScriptlet(Scriptlet):
def on_load(self):
self.log.debug("Loaded!")
self.machine.events.add_handler('test_event', self._update)
def _update(self, **kwargs):
del kwargs
self.machine.events.post("test_response")
```
|
{
"source": "jepierre/python_apps",
"score": 3
}
|
#### File: python_apps/aiosync_ex/asyncio_ex.py
```python
import logging
import os
import asyncio
# set up logger
import time
logger = logging.getLogger("root")
logger.setLevel(logging.DEBUG)
# Paths
PATH = os.path.dirname(__file__)
def main(*args):
# Turns on logging to console
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - <%(funcName)s:%(lineno)s> - %(message)s",
"%Y-%m-%d %H:%M:%S",
)
ch.setFormatter(formatter)
logger.addHandler(ch)
async def print_x(name, number, seconds):
for i in range(number):
logger.debug("Task {} at number: {}".format(name, i))
await asyncio.sleep(seconds)
start = time.time()
my_event_loop = asyncio.get_event_loop()
tasks = [
asyncio.ensure_future(print_x("A", number=10, seconds=2)),
asyncio.ensure_future(print_x("B", number=15, seconds=1)),
]
my_event_loop.run_until_complete(asyncio.wait(tasks))
my_event_loop.close()
end = time.time()
logger.debug("total time: {}".format(end - start))
if __name__ == "__main__":
main()
```
#### File: python_apps/PyMonthlyExpense/plain_text_edit_logger.py
```python
import logging
from PyQt5 import uic, Qt, QtCore
from PyQt5.QtGui import QTextCursor
from PyQt5.QtWidgets import (
QPlainTextEdit,
QWidget,
QApplication,
QMainWindow,
QPushButton,
QVBoxLayout,
QDockWidget,
QTextEdit,
QGroupBox,
)
import sys
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter(
"%(asctime)s: %(name)s - %(levelname)s - <%(module)s:%(funcName)s:%(lineno)d> - %(message)s"
)
class QPlainTextEditLogger(logging.Handler):
def __init__(self, text_box_handle):
logging.Handler.__init__(self)
self.text_edit_logger = text_box_handle
self.text_edit_logger.setReadOnly(True)
def emit(self, record):
msg = self.format(record)
self.text_edit_logger.append(msg)
self.text_edit_logger.moveCursor(QTextCursor.End)
def write(self, m):
pass
class CustomLoggerWidget(QDockWidget):
def __init__(self, formatter=FORMATTER):
super().__init__()
self.setWindowTitle("Log")
self.formatter = formatter
self.init_ui()
def init_ui(self):
text_box_handle = QTextEdit()
text_box_handle.document().setDefaultStyleSheet(
".CRITICAL{color:red; font-weight:bold} .ERROR{color:#CC0000; weight:bold} "
".WARNING{color:#CCAA33} .INFO{color:black} .DEBUG{color:green} .VERBOSE{color:blue} "
".TIME{color:black} .MODULE{color:purple} .FUNCTION {color:gray; font-weight:bold}"
)
text_box_handle.append("<style>.DEBUG {color:red} .INFO {color:blue} </style>")
formatter = logging.Formatter(
fmt="<span class=TIME>%(asctime)s </span>"
+ "<span class=MODULE><%(module)s> </span>"
+ "<span class=FUNCTION><%(funcName)s:%(lineno)s></span>"
+ "<span class=%(levelname)s> %(levelname)s - %(message)s</span>"
)
# text_box_handle.document().setDefaultStyleSheet('.CRITICAL{color:red; font-weight:bold}'
# '.ERROR{color:#cc000; weight:bold}'
# '.WARNING{color:#CCAA33} .INFO{color:black}')
# text_box_handle.append('<style>.DEBUG {color:red} .INFO{color:blue} </style>')
# formatter = logging.Formatter(fmt='<span class=TIME>%(asctime)s </span>' +
# '<span class=MODULE><%(module)s> </span>')
log_text_box = QPlainTextEditLogger(text_box_handle)
# set format
log_text_box.setFormatter(formatter)
logging.getLogger().addHandler(log_text_box)
# set logging level
logging.getLogger().setLevel(logging.DEBUG)
# vboxformat.addWidget(log_text_box.text_edit_logger)
# vboxformat.addWidget(text_box_handle)
self.setWidget(text_box_handle)
# self.dock_log.setWidget(text_box_handle)
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
self.init_ui()
def init_ui(self):
self.setWindowTitle("Logger Example")
self.setGeometry(200, 200, 640, 480)
self._button = QPushButton()
self._button.setText("Test Me")
group_box = QGroupBox()
layout = QVBoxLayout()
layout.addWidget(self._button)
te = QTextEdit()
logg = QPlainTextEditLogger(te)
clogger = CustomLoggerWidget(self)
# layout.addWidget(clogger)
# layout.addWidget(logg.text_edit_logger)
self._button.clicked.connect(self.test)
group_box.setLayout(layout)
# self.setCentralWidget(QTextEdit())
self.layout().addWidget(group_box)
self.addDockWidget(QtCore.Qt.BottomDockWidgetArea, clogger)
# self.layout().addWidget(self._button)
self.show()
def test(self):
logging.debug("damn, a bug")
logging.info("something to remember")
logging.warning("that's not right")
logging.error("foobar")
def main():
# Enable logging on the console
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(FORMATTER)
logger.addHandler(ch)
# Opens the app
app = QApplication(sys.argv)
App = MainWindow()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
```
#### File: python_apps/PyNotePad/Main.py
```python
__appname__ = "PyNotePad"
import logging
import os
import sys
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.Qt import QFontDatabase
import termcolor
import traceback
app_path = os.path.dirname(__file__)
__path__ = app_path
app_log_path = os.path.join(app_path, "logs")
if not os.path.exists(app_log_path):
os.makedirs(app_log_path)
log_file_name = __appname__ + ".txt"
formatter = "%(asctime)s: %(name)s -%(levelname)s -%(module)s -%(funcName)s -%(lineno)-3d -%(message)s"
logging.basicConfig(
filename=os.path.join(app_log_path, log_file_name), format=formatter
)
logger = logging.getLogger(name="main-gui")
logger.setLevel(logging.DEBUG)
class Main(QMainWindow):
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
uic.loadUi(r"ui_files/main.ui", self)
logger.debug("loading main.ui")
self.setWindowTitle(__appname__)
self.init_ui()
self.show()
def init_ui(self):
fixed_font = QFontDatabase.systemFont(QFontDatabase.FixedFont)
fixed_font.setPointSize(12)
self.main_text_edit.setFont(fixed_font)
self.action_exit.triggered.connect(self.exit_app)
def exit_app(self):
sys.exit(0)
def save_file(self):
pass
def save_file_as(self):
pass
def open_file(self):
pass
def main():
# make sure were in the current path of the Main file
os.chdir(os.path.dirname(__file__))
# Enable logging on the console
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter(formatter))
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
# catches errors in gui and print them
def excepthook(etype, value, tb):
if isinstance(value, KeyboardInterrupt):
sys.exit(1)
else:
termcolor.cprint("Sorry, something's wrong! ", "yellow", file=sys.stderr)
# print traceback
traceback.print_exception(etype, value, tb)
# Set global exception handler.
sys.excepthook = excepthook
# Open the app
app = QApplication(sys.argv)
App = Main(parent=None)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
```
#### File: python_apps/TDD/mycode.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def hello_world():
return "hello world"
def create_num_list(length):
return [x for x in range(length)]
def custom_func_x(x, const, power):
return const * x ** power
def custom_non_lin_num_list(length, const, power):
return [custom_func_x(x, const, power) for x in range(length)]
def main():
pass
if __name__ == "__main__":
main()
```
#### File: python_apps/Threading/threading_ex3.py
```python
from PyQt5 import uic
from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QSplashScreen, QMainWindow
from PyQt5.QtCore import (
QTime,
QTimer,
Qt,
QThread,
pyqtSignal,
QObject,
QRunnable,
QThreadPool,
)
from PyQt5 import uic
import sys
import time
import logging
logger = logging.getLogger("root")
logger.setLevel(logging.DEBUG)
# Note that a QRunnable isn't a subclass of QObject and therefore does
# not provide signals and slots.
# this keeps running even after app quits
class RunableThread(QRunnable):
finished = pyqtSignal()
def __init__(self, line_edit):
super(RunableThread, self).__init__()
self.threadLineEdit = line_edit
def run(self):
logger.debug("printing from thread 3")
loop = 1000
for x in range(loop):
logger.debug(f"thread 3: in loop: {x}")
time.sleep(1)
self.threadLineEdit.setText(f"thread counter: {x}")
self.finished.emit()
class WorkerThread(QThread):
threadLineEdit = None
thread_counter = 0
def __init__(self, line_edit):
super(WorkerThread, self).__init__()
self.threadLineEdit = line_edit
def run(self):
logger.debug("printing from thread 1")
loop = 1000
for x in range(loop):
logger.debug(f"in loop: {x}")
if self.thread_counter > loop:
self.thread_counter = 0
else:
time.sleep(1)
self.thread_counter += 1
self.threadLineEdit.setText(f"thread counter: {self.thread_counter}")
class ObjectToThread(QObject):
finished = pyqtSignal()
def __init__(self, line_edit):
super(ObjectToThread, self).__init__()
self.thread2LineEdit = line_edit
def long_running_process(self):
logger.debug("printing from thread 2")
loop = 1000
for x in range(loop):
logger.debug(f"thread 2: in loop: {x}")
time.sleep(1)
self.thread2LineEdit.setText(f"thread counter: {x}")
self.finished.emit()
class Main(QMainWindow):
class TempClass(QThread):
def __init__(self):
QThread.__init__(self)
def run(self):
for x in range(1000):
logger.debug(f"in temp class: {x}")
time.sleep(1)
tmp_clas = TempClass()
INTERVAL = 1000
count = 0
thread_counter = 0
count1 = 0
finished = pyqtSignal(bool)
my_thread = QThread()
def __init__(self):
super().__init__()
uic.loadUi(r"main.ui", self)
self.init_ui()
def init_ui(self):
self.temp_thread = self.TempClass()
self.temp_thread.finished.connect(self.print_all_done)
self.temp_thread.start()
self.print_msg()
self.use_qthread()
self.use_runnable()
self.use_move_to_thread()
self.show()
def use_runnable(self):
myrunnable = RunableThread(self.thread3LineEdit)
QThreadPool.globalInstance().start(myrunnable)
def use_qthread(self):
self.message_thread = WorkerThread(self.threadLineEdit)
self.message_thread.finished.connect(self.print_all_done)
self.message_thread.start()
def use_move_to_thread(self):
# in order for this to work:
# 1. you can't put main application window in a thread
# 2. my thread has to be defined globally
self.obj = ObjectToThread(self.thread2LineEdit)
self.obj.moveToThread(self.my_thread)
self.obj.finished.connect(self.my_thread.quit)
self.my_thread.started.connect(self.obj.long_running_process)
self.my_thread.finished.connect(self.print_all_done)
self.my_thread.start()
def print_all_done(self):
logger.debug("all done")
# Timer with single shot
def print_msg(self):
self.count1 %= 1000
self.count1 += 1
self.printLineEdit.setText(f"print counter: {self.count1}")
QTimer.singleShot(self.INTERVAL, self.print_msg)
def main():
# Enable logging on the console
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
sys._excepthook = sys.excepthook
def exception_hook(exctype, value, traceback):
print(exctype, value, traceback)
sys._excepthook(exctype, value, traceback)
sys.exit(1)
sys.excepthook = exception_hook
# Opens the app
app = QApplication(sys.argv)
App = Main()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
```
|
{
"source": "jepler/adabot",
"score": 2
}
|
#### File: adabot/adabot/circuitpython_libraries.py
```python
import argparse
import datetime
import inspect
import logging
import re
import sys
import traceback
from adabot import github_requests as github
from adabot import pypi_requests as pypi
from adabot.lib import circuitpython_library_validators as cirpy_lib_vals
from adabot.lib import common_funcs
from adabot.lib import assign_hacktober_label as hacktober
from adabot.lib import blinka_funcs
from adabot import circuitpython_library_download_stats as dl_stats
logger = logging.getLogger(__name__)
ch = logging.StreamHandler(stream=sys.stdout)
logging.basicConfig(level=logging.INFO, format="%(message)s", handlers=[ch])
# Setup ArgumentParser
cmd_line_parser = argparse.ArgumentParser(
description="Adabot utility for CircuitPython Libraries.",
prog="Adabot CircuitPython Libraries Utility",
)
cmd_line_parser.add_argument(
"-o",
"--output_file",
help="Output log to the filename provided.",
metavar="<OUTPUT FILENAME>",
dest="output_file",
)
cmd_line_parser.add_argument(
"-p",
"--print",
help="Set the level of verbosity printed to the command prompt."
" Zero is off; One is on (default).",
type=int,
default=1,
dest="verbose",
choices=[0, 1],
)
cmd_line_parser.add_argument(
"-e",
"--error_depth",
help="Set the threshold for outputting an error list. Default is 5.",
dest="error_depth",
type=int,
default=5,
metavar="n",
)
cmd_line_parser.add_argument(
"-v",
"--validator",
help="Run validators with 'all', or only the validator(s) supplied in a string.",
dest="validator",
metavar='all OR "validator1, validator2, ..."',
)
# Functions to run on repositories to validate their state. By convention these
# return a list of string errors for the specified repository (a dictionary
# of Github API repository object state).
default_validators = [
vals
for vals in inspect.getmembers(cirpy_lib_vals.LibraryValidator)
if vals[0].startswith("validate")
]
pr_sort_re = re.compile(r"(?<=\(Open\s)(.+)(?=\sdays)")
close_pr_sort_re = re.compile(r"(?<=\(Days\sopen:\s)(.+)(?=\))")
blinka_repos = [
"Adafruit_Blinka",
"Adafruit_Blinka_bleio",
"Adafruit_Blinka_Displayio",
"Adafruit_Python_PlatformDetect",
"Adafruit_Python_PureIO",
"Adafruit_Blinka_PyPortal",
"Adafruit_Python_Extended_Bus",
]
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def run_library_checks(validators, kw_args, error_depth):
"""runs the various library checking functions"""
# Load the latest pylint version
latest_pylint = "2.0.1"
pylint_info = pypi.get("/pypi/pylint/json")
if pylint_info and pylint_info.ok:
latest_pylint = pylint_info.json()["info"]["version"]
logger.info("Latest pylint is: %s", latest_pylint)
repos = common_funcs.list_repos(
include_repos=tuple(blinka_repos)
+ ("CircuitPython_Community_Bundle", "cookiecutter-adafruit-circuitpython")
)
logger.info("Found %s repos to check.", len(repos))
bundle_submodules = common_funcs.get_bundle_submodules()
logger.info("Found %s submodules in the bundle.", len(bundle_submodules))
github_user = common_funcs.whois_github_user()
logger.info("Running GitHub checks as %s", github_user)
need_work = 0
lib_insights = common_funcs.InsightData()
blinka_insights = common_funcs.InsightData()
core_insights = common_funcs.InsightData()
core_insights["milestones"] = dict()
repo_needs_work = []
since = datetime.datetime.now() - datetime.timedelta(days=7)
repos_by_error = {}
new_libs = {}
updated_libs = {}
validator = cirpy_lib_vals.LibraryValidator(
validators, bundle_submodules, latest_pylint, **kw_args
)
for repo in repos:
if len(validators) != 0:
errors = validator.run_repo_validation(repo)
if errors:
need_work += 1
repo_needs_work.append(repo)
# print(repo["full_name"])
# print("\n".join(errors))
# print()
for error in errors:
if not isinstance(error, tuple):
# check for an error occurring in the valiator module
if error == cirpy_lib_vals.ERROR_OUTPUT_HANDLER:
# print(errors, "repo output handler error:", validator.output_file_data)
logger.info(", ".join(validator.output_file_data))
validator.output_file_data.clear()
if error not in repos_by_error:
repos_by_error[error] = []
repos_by_error[error].append(repo["html_url"])
else:
if error[0] not in repos_by_error:
repos_by_error[error[0]] = []
repos_by_error[error[0]].append(
"{0} ({1} days)".format(repo["html_url"], error[1])
)
insights = lib_insights
if repo["owner"]["login"] == "adafruit":
if repo["name"] in blinka_repos:
insights = blinka_insights
elif repo["name"] == "circuitpython":
insights = core_insights
closed_metric = bool(insights == lib_insights)
errors = validator.gather_insights(
repo, insights, since, show_closed_metric=closed_metric
)
if errors:
print("insights error")
for error in errors:
if error == cirpy_lib_vals.ERROR_OUTPUT_HANDLER:
logger.info(", ".join(validator.output_file_data))
validator.output_file_data.clear()
# get a list of new & updated libraries for the last week
if repo["name"] != "Adafruit_CircuitPython_Bundle":
check_releases = common_funcs.is_new_or_updated(repo)
if check_releases == "new":
new_libs[repo["name"]] = repo["html_url"]
elif check_releases == "updated":
updated_libs[repo["name"]] = repo["html_url"]
logger.info("")
logger.info("State of CircuitPython + Libraries + Blinka")
logger.info("### Overall")
print_pr_overview(lib_insights, core_insights, blinka_insights)
print_issue_overview(lib_insights, core_insights, blinka_insights)
logger.info("")
logger.info("### Core")
print_pr_overview(core_insights)
logger.info("* %s open pull requests", len(core_insights["open_prs"]))
sorted_prs = sorted(
core_insights["open_prs"],
key=lambda days: int(pr_sort_re.search(days).group(1)),
reverse=True,
)
for pull_request in sorted_prs:
logger.info(" * %s", pull_request)
print_issue_overview(core_insights)
logger.info("* %s open issues", len(core_insights["open_issues"]))
logger.info(" * https://github.com/adafruit/circuitpython/issues")
logger.info("* %s active milestones", len(core_insights["milestones"]))
ms_count = 0
for milestone in sorted(core_insights["milestones"].keys()):
ms_count += core_insights["milestones"][milestone]
logger.info(
" * %s: %s open issues", milestone, core_insights["milestones"][milestone]
)
logger.info(
" * %s issues not assigned a milestone",
len(core_insights["open_issues"]) - ms_count,
)
logger.info("")
## temporarily disabling core download stats:
# - GitHub API has been broken, due to the number of release artifacts
# - Release asset delivery is being moved to AWS CloudFront/S3
# print_circuitpython_dl_stats()
logger.info("* Core download stats available at https://circuitpython.org/stats")
logger.info("")
logger.info("### Libraries")
print_pr_overview(lib_insights)
logger.info(" * Merged pull requests:")
sorted_prs = sorted(
lib_insights["merged_prs"],
key=lambda days: int(close_pr_sort_re.search(days).group(1)),
reverse=True,
)
for pull_request in sorted_prs:
logger.info(" * %s", pull_request)
open_pr_days = [
int(pr_sort_re.search(pull_request).group(1))
for pull_request in lib_insights["open_prs"]
if pr_sort_re.search(pull_request) is not None
]
if len(lib_insights["open_prs"]) != 0:
logger.info(
" * %s open pull requests (Oldest: %s, Newest: %s)",
len(lib_insights["open_prs"]),
max(open_pr_days),
max((min(open_pr_days), 1)), # ensure the minumum is '1'
)
print_issue_overview(lib_insights)
logger.info(" * %s open issues", len(lib_insights["open_issues"]))
logger.info(" * %s good first issues", lib_insights["good_first_issues"])
logger.info("* https://circuitpython.org/contributing")
logger.info("Library updates in the last seven days:")
if len(new_libs) != 0:
logger.info("**New Libraries**")
for title, link in new_libs.items():
logger.info(" * [%s](%s)", title, link)
if len(updated_libs) != 0:
logger.info("**Updated Libraries**")
for title, link in updated_libs.items():
logger.info(" * [%s](%s)", title, link)
if len(validators) != 0:
lib_repos = []
for repo in repos:
if repo["owner"]["login"] == "adafruit" and repo["name"].startswith(
"Adafruit_CircuitPython"
):
lib_repos.append(repo)
logger.info("%s out of %s repos need work.", need_work, len(lib_repos))
list_repos_for_errors = [cirpy_lib_vals.ERROR_NOT_IN_BUNDLE]
logger.info("")
for error in sorted(repos_by_error):
if not repos_by_error[error]:
continue
logger.info("")
error_count = len(repos_by_error[error])
logger.info("%s - %s", error, error_count)
if error_count <= error_depth or error in list_repos_for_errors:
logger.info(
"%s", "\n".join([" * " + x for x in repos_by_error[error]])
)
logger.info("")
logger.info("### Blinka")
print_pr_overview(blinka_insights)
logger.info("* %s open pull requests", len(blinka_insights["open_prs"]))
sorted_prs = sorted(
blinka_insights["open_prs"],
key=lambda days: int(pr_sort_re.search(days).group(1)),
reverse=True,
)
for pull_request in sorted_prs:
logger.info(" * %s", pull_request)
print_issue_overview(blinka_insights)
logger.info("* %s open issues", len(blinka_insights["open_issues"]))
logger.info(" * https://github.com/adafruit/Adafruit_Blinka/issues")
blinka_dl = dl_stats.piwheels_stats().get("adafruit-blinka", {}).get("month", "N/A")
logger.info("* Piwheels Downloads in the last month: %s", blinka_dl)
logger.info("Number of supported boards: %s", blinka_funcs.board_count())
# pylint: disable=too-many-branches,too-many-statements
def print_circuitpython_dl_stats():
"""Gather and report analytics on the main CircuitPython repository."""
# TODO: with the move of release assets to AWS CloudFront/S3, update
# this to use AWS CloudWatch metrics to gather download stats.
# AWS' Python SDK `boto3` has CloudWatch interfaces which should
# enable this.
try:
response = github.get("/repos/adafruit/circuitpython/releases")
except (ValueError, RuntimeError):
logger.info("Core CircuitPython GitHub download statistics request failed.")
return
if not response.ok:
logger.info("Core CircuitPython GitHub download statistics request failed.")
return
releases = response.json()
found_unstable = False
found_stable = False
stable_tag = None
prerelease_tag = None
by_board = {}
by_language = {}
by_both = {}
total = {}
asset_re = re.compile(
r"""
circuitpython\- # end of the prefix
(?P<board>.+)\- # board name
(?P<lang>.+)\- # language
(\d\.\d\.\d.*) # version
\.(?=uf2|bin|hex) # file extension
""",
re.I | re.X,
)
for release in releases:
if not found_unstable and not release["draft"] and release["prerelease"]:
found_unstable = True
prerelease_tag = release["tag_name"]
elif not found_stable and not release["draft"] and not release["prerelease"]:
found_stable = True
stable_tag = release["tag_name"]
else:
continue
for asset in release["assets"]:
if not asset["name"].startswith("adafruit-circuitpython"):
continue
count = asset["download_count"]
info_re = asset_re.search(asset["name"])
if not info_re:
print("Skipping stats for '{}'".format(asset["name"]))
continue
board = info_re.group("board")
language = info_re.group("lang")
if language not in by_language:
by_language[language] = {release["tag_name"]: 0}
if release["tag_name"] not in by_language[language]:
by_language[language][release["tag_name"]] = 0
by_language[language][release["tag_name"]] += count
if board not in by_board:
by_board[board] = {release["tag_name"]: 0}
by_both[board] = {}
if release["tag_name"] not in by_board[board]:
by_board[board][release["tag_name"]] = 0
by_board[board][release["tag_name"]] += count
by_both[board][language] = count
if release["tag_name"] not in total:
total[release["tag_name"]] = 0
total[release["tag_name"]] += count
logger.info("Number of supported boards: %s", len(by_board))
logger.info("")
logger.info("Download stats by board:")
logger.info("")
by_board_list = [
[
"Board",
"{}".format(stable_tag.strip(" ")),
"{}".format(prerelease_tag.strip(" ")),
],
]
for board in sorted(by_board.items()):
by_board_list.append(
[
str(board[0]),
(str(board[1][stable_tag]) if stable_tag in board[1] else "-"),
(str(board[1][prerelease_tag]) if prerelease_tag in board[1] else "-"),
]
)
long_col = [
(max([len(str(row[i])) for row in by_board_list]) + 3)
for i in range(len(by_board_list[0]))
]
# row_format = "".join(["{:<" + str(this_col) + "}" for this_col in long_col])
row_format = "".join(
[
"| {:<" + str(long_col[0]) + "}",
"|{:^" + str(long_col[1]) + "}",
"|{:^" + str(long_col[2]) + "}|",
]
)
by_board_list.insert(
1,
[
"{}".format("-" * (long_col[0])),
"{}".format("-" * (long_col[1])),
"{}".format("-" * (long_col[2])),
],
)
by_board_list.extend(
(
[
"{}".format("-" * (long_col[0])),
"{}".format("-" * (long_col[1])),
"{}".format("-" * (long_col[2])),
],
[
"{0}{1}".format(" " * (long_col[0] - 6), "Total"),
"{}".format(total[stable_tag]),
"{}".format(total[prerelease_tag]),
],
[
"{}".format("-" * (long_col[0])),
"{}".format("-" * (long_col[1])),
"{}".format("-" * (long_col[2])),
],
)
)
for row in by_board_list:
logger.info("%s", row_format.format(*row))
logger.info("")
logger.info("Download stats by language:")
logger.info("")
by_lang_list = [
[
"Board",
"{}".format(stable_tag.strip(" ")),
"{}".format(prerelease_tag.strip(" ")),
],
]
for board in sorted(by_language.items()):
by_lang_list.append(
[
str(board[0]),
(str(board[1][stable_tag]) if stable_tag in board[1] else "-"),
(str(board[1][prerelease_tag]) if prerelease_tag in board[1] else "-"),
]
)
long_col = [
(max([len(str(row[i])) for row in by_lang_list]) + 3)
for i in range(len(by_lang_list[0]))
]
# row_format = "".join(["{:<" + str(this_col) + "}" for this_col in long_col])
row_format = "".join(
[
"| {:<" + str(long_col[0]) + "}",
"|{:^" + str(long_col[1]) + "}",
"|{:^" + str(long_col[2]) + "}|",
]
)
by_lang_list.insert(
1,
[
"{}".format("-" * (long_col[0])),
"{}".format("-" * (long_col[1])),
"{}".format("-" * (long_col[2])),
],
)
by_lang_list.extend(
(
[
"{}".format("-" * (long_col[0])),
"{}".format("-" * (long_col[1])),
"{}".format("-" * (long_col[2])),
],
[
"{0}{1}".format(" " * (long_col[0] - 6), "Total"),
"{}".format(total[stable_tag]),
"{}".format(total[prerelease_tag]),
],
[
"{}".format("-" * (long_col[0])),
"{}".format("-" * (long_col[1])),
"{}".format("-" * (long_col[2])),
],
)
)
for row in by_lang_list:
logger.info("%s", row_format.format(*row))
# for language in by_language:
# logger.info("* %s - %s", language, by_language[language])
logger.info("")
def print_pr_overview(*insights):
"""Prints an overview of Pull Requests"""
merged_prs = sum([len(x["merged_prs"]) for x in insights])
authors = set().union(*[x["pr_merged_authors"] for x in insights])
reviewers = set().union(*[x["pr_reviewers"] for x in insights])
logger.info("* %s pull requests merged", merged_prs)
logger.info(" * %s authors - %s", len(authors), ", ".join(authors))
logger.info(" * %s reviewers - %s", len(reviewers), ", ".join(reviewers))
def print_issue_overview(*insights):
"""Prints an overview of Issues"""
closed_issues = sum([x["closed_issues"] for x in insights])
issue_closers = set().union(*[x["issue_closers"] for x in insights])
new_issues = sum([x["new_issues"] for x in insights])
issue_authors = set().union(*[x["issue_authors"] for x in insights])
logger.info(
"* %s closed issues by %s people, %s opened by %s people",
closed_issues,
len(issue_closers),
new_issues,
len(issue_authors),
)
# print Hacktoberfest labels changes if its Hacktober
in_season, season_action = hacktober.is_hacktober_season()
if in_season:
hacktober_changes = ""
if season_action == "add":
hacktober_changes = "* Assigned Hacktoberfest label to {} issues.".format(
sum([x["hacktober_assigned"] for x in insights])
)
elif season_action == "remove":
hacktober_changes += "* Removed Hacktoberfest label from {} issues.".format(
sum([x["hacktober_removed"] for x in insights])
)
logger.info(hacktober_changes)
# pylint: disable=too-many-branches
def main(verbose=1, output_file=None, validator=None, error_depth=5):
"""Main"""
validator_kwarg_list = {}
startup_message = [
"Running CircuitPython Library checks...",
"Report Date: {}".format(datetime.datetime.now().strftime("%d %B %Y, %I:%M%p")),
]
if verbose == 0:
logger.setLevel("CRITICAL")
if output_file:
file_handler = logging.FileHandler(output_file)
logger.addHandler(file_handler)
startup_message.append(
" - Report output will be saved to: {}".format(output_file)
)
validators = []
validator_names = []
if validator:
startup_message.append(
" - Depth for listing libraries with errors: {}".format(error_depth)
)
if validator != "all":
validators = []
for func in validator.split(","):
func_name = func.strip()
try:
if not func_name.startswith("validate"):
raise KeyError
# print('{}'.format(func_name))
if "contents" not in func_name:
validators.append(
[
val[1]
for val in default_validators
if func_name in val[0]
][0]
)
else:
validators.insert(
0,
[
val[1]
for val in default_validators
if func_name in val[0]
][0],
)
validator_names.append(func_name)
except KeyError:
# print(default_validators)
logger.info(
"Error: '%s' is not an available validator.\nAvailable validators are: %s",
func.strip(),
", ".join([val[0] for val in default_validators]),
)
sys.exit()
else:
validators = [val_funcs[1] for val_funcs in default_validators]
validator_names = [val_names[0] for val_names in default_validators]
startup_message.append(
" - These validators will run: {}".format(", ".join(validator_names))
)
if "validate_contents" not in validator_names:
validator_kwarg_list["validate_contents_quiet"] = True
validators.insert(
0,
[val[1] for val in default_validators if "validate_contents" in val[0]][
0
],
)
try:
for message in startup_message:
logger.info(message)
logger.info("")
# print(validators)
run_library_checks(
validators,
validator_kwarg_list,
error_depth,
)
except:
_, exc_val, exc_tb = sys.exc_info()
logger.error("Exception Occurred!")
logger.error(("-" * 60))
logger.error("Traceback (most recent call last):")
trace = traceback.format_tb(exc_tb)
for line in trace:
logger.error(line)
logger.error(exc_val)
raise
if __name__ == "__main__":
cli_args = cmd_line_parser.parse_args()
main(
verbose=cli_args.verbose,
output_file=cli_args.output_file,
validator=cli_args.validator,
error_depth=cli_args.error_depth,
)
```
#### File: adabot/adabot/circuitpython_library_patches.py
```python
import argparse
import os
import shutil
import sys
import requests
import sh
from sh.contrib import git
from adabot.lib import common_funcs
working_directory = os.path.abspath(os.getcwd())
lib_directory = working_directory + "/.libraries/"
patch_directory = working_directory + "/patches/"
repos = []
check_errors = []
apply_errors = []
stats = []
"""
Setup the command line argument parsing object.
"""
cli_parser = argparse.ArgumentParser(
description="Apply patches to any common file(s) in"
" all Adafruit CircuitPython Libraries."
)
cli_parser.add_argument(
"-l", "--list", help="Lists the available patches to run.", action="store_true"
)
cli_parser.add_argument(
"-p",
help="Runs only the single patch referenced.",
metavar="<PATCH FILENAME>",
dest="patch",
)
cli_parser.add_argument(
"-f",
help="Adds the referenced FLAGS to the git.am call."
" Only available when using '-p'. Enclose flags in brackets '[]'."
" Multiple flags can be passed. NOTE: '--signoff' is already used "
" used by default, and will be ignored. EXAMPLE: -f [-C0] -f [-s]",
metavar="FLAGS",
action="append",
dest="flags",
type=str,
)
cli_parser.add_argument(
"--use-apply",
help="Forces use of 'git apply' instead of 'git am'."
" This is necessary when needing to use 'apply' flags not available"
" to 'am' (e.g. '--unidiff-zero'). Only available when using '-p'.",
action="store_true",
dest="use_apply",
)
cli_parser.add_argument(
"--dry-run",
help="Accomplishes a dry run of patches, without applying" " them.",
action="store_true",
dest="dry_run",
)
cli_parser.add_argument(
"--local",
help="Force use of local patches. This skips verification"
" of patch files in the adabot GitHub repository. MUST use '--dry-run'"
" with this argument; this guards against applying unapproved patches.",
action="store_true",
dest="run_local",
)
def get_repo_list():
"""Uses adabot.circuitpython_libraries module to get a list of
CircuitPython repositories. Filters the list down to adafruit
owned/sponsored CircuitPython libraries.
"""
repo_list = []
get_repos = common_funcs.list_repos()
for repo in get_repos:
if not (
repo["owner"]["login"] == "adafruit"
and repo["name"].startswith("Adafruit_CircuitPython")
):
continue
repo_list.append(dict(name=repo["name"], url=repo["clone_url"]))
return repo_list
def get_patches(run_local):
"""Returns the list of patch files located in the adabot/patches
directory.
"""
return_list = []
if not run_local:
contents = requests.get(
"https://api.github.com/repos/adafruit/adabot/contents/patches"
)
if contents.ok:
for patch in contents.json():
patch_name = patch["name"]
return_list.append(patch_name)
else:
contents = os.listdir(patch_directory)
for file in contents:
if file.endswith(".patch"):
return_list.append(file)
return return_list
# pylint: disable=too-many-arguments
def apply_patch(repo_directory, patch_filepath, repo, patch, flags, use_apply):
"""Apply the `patch` in `patch_filepath` to the `repo` in
`repo_directory` using git am or git apply. The commit
with the user running the script (adabot if credentials are set
for that).
When `use_apply` is true, the `--apply` flag is automatically added
to ensure that any passed flags that turn off apply (e.g. `--check`)
are overridden.
"""
if not os.getcwd() == repo_directory:
os.chdir(repo_directory)
if not use_apply:
try:
git.am(flags, patch_filepath)
except sh.ErrorReturnCode as err:
apply_errors.append(
dict(repo_name=repo, patch_name=patch, error=err.stderr)
)
return False
else:
apply_flags = ["--apply"]
for flag in flags:
if not flag == "--signoff":
apply_flags.append(flag)
try:
git.apply(apply_flags, patch_filepath)
except sh.ErrorReturnCode as err:
apply_errors.append(
dict(repo_name=repo, patch_name=patch, error=err.stderr)
)
return False
with open(patch_filepath) as patchfile:
for line in patchfile:
if "[PATCH]" in line:
message = '"' + line[(line.find("]") + 2) :] + '"'
break
try:
git.commit("-a", "-m", message)
except sh.ErrorReturnCode as err:
apply_errors.append(
dict(repo_name=repo, patch_name=patch, error=err.stderr)
)
return False
try:
git.push()
except sh.ErrorReturnCode as err:
apply_errors.append(dict(repo_name=repo, patch_name=patch, error=err.stderr))
return False
return True
# pylint: disable=too-many-locals,too-many-branches,too-many-statements
def check_patches(repo, patches, flags, use_apply, dry_run):
"""Gather a list of patches from the `adabot/patches` directory
on the adabot repo. Clone the `repo` and run git apply --check
to test wether it requires any of the gathered patches.
When `use_apply` is true, any flags except `--apply` are passed
through to the check call. This ensures that the check call is
representative of the actual apply call.
"""
applied = 0
skipped = 0
failed = 0
repo_directory = lib_directory + repo["name"]
for patch in patches:
try:
os.chdir(lib_directory)
except FileNotFoundError:
os.mkdir(lib_directory)
os.chdir(lib_directory)
try:
git.clone(repo["url"])
except sh.ErrorReturnCode_128 as err:
if b"already exists" in err.stderr:
pass
else:
raise RuntimeError(err.stderr) from None
os.chdir(repo_directory)
patch_filepath = patch_directory + patch
try:
check_flags = ["--check"]
if use_apply:
for flag in flags:
if not flag in ("--apply", "--signoff"):
check_flags.append(flag)
git.apply(check_flags, patch_filepath)
run_apply = True
except sh.ErrorReturnCode_1 as err:
run_apply = False
if b"error" not in err.stderr or b"patch does not apply" in err.stderr:
parse_err = err.stderr.decode()
parse_err = parse_err[parse_err.rfind(":") + 1 : -1]
print(" . Skipping {}:{}".format(repo["name"], parse_err))
skipped += 1
else:
failed += 1
error_str = str(err.stderr, encoding="utf-8").replace("\n", " ")
error_start = error_str.rfind("error:") + 7
check_errors.append(
dict(
repo_name=repo["name"],
patch_name=patch,
error=error_str[error_start:],
)
)
except sh.ErrorReturnCode as err:
run_apply = False
failed += 1
error_str = str(err.stderr, encoding="utf-8").replace("\n", " ")
error_start = error_str.rfind("error:") + 7
check_errors.append(
dict(
repo_name=repo["name"],
patch_name=patch,
error=error_str[error_start:],
)
)
if run_apply and not dry_run:
result = apply_patch(
repo_directory, patch_filepath, repo["name"], patch, flags, use_apply
)
if result:
applied += 1
else:
failed += 1
elif run_apply and dry_run:
applied += 1
return [applied, skipped, failed]
if __name__ == "__main__":
cli_args = cli_parser.parse_args()
if cli_args.run_local:
if cli_args.dry_run or cli_args.list:
pass
else:
raise RuntimeError(
"'--local' can only be used in conjunction with"
" '--dry-run' or '--list'."
)
run_patches = get_patches(cli_args.run_local)
cmd_flags = ["--signoff"]
if cli_args.list:
print("Available Patches:", run_patches)
sys.exit()
if cli_args.patch:
if not cli_args.patch in run_patches:
raise ValueError(
"'{}' is not an available patchfile.".format(cli_args.patch)
)
run_patches = [cli_args.patch]
if cli_args.flags is not None:
if not cli_args.patch:
raise RuntimeError(
"Must be used with a single patch. See help (-h) for usage."
)
if "[-i]" in cli_args.flags:
raise ValueError("Interactive Mode flag not allowed.")
for flag_arg in cli_args.flags:
if not flag_arg == "[--signoff]":
cmd_flags.append(flag_arg.strip("[]"))
if cli_args.use_apply:
if not cli_args.patch:
raise RuntimeError(
"Must be used with a single patch. See help (-h) for usage."
)
print(".... Beginning Patch Updates ....")
print(".... Working directory:", working_directory)
print(".... Library directory:", lib_directory)
print(".... Patches directory:", patch_directory)
check_errors = []
apply_errors = []
stats = [0, 0, 0]
print(".... Deleting any previously cloned libraries")
try:
libs = os.listdir(path=lib_directory)
for lib in libs:
shutil.rmtree(lib_directory + lib)
except FileNotFoundError:
pass
repos = get_repo_list()
print(".... Running Patch Checks On", len(repos), "Repos ....")
for repository in repos:
results = check_patches(
repository,
run_patches,
cmd_flags,
cli_args.use_apply,
cli_args.dry_run
)
for k in range(3):
stats[k] += results[k]
print(".... Patch Updates Completed ....")
print(".... Patches Applied:", stats[0])
print(".... Patches Skipped:", stats[1])
print(".... Patches Failed:", stats[2], "\n")
print(".... Patch Check Failure Report ....")
if len(check_errors) > 0:
for error in check_errors:
print(
">> Repo: {0}\tPatch: {1}\n Error: {2}".format(
error["repo_name"], error["patch_name"], error["error"]
)
)
else:
print("No Failures")
print("\n")
print(".... Patch Apply Failure Report ....")
if len(apply_errors) > 0:
for error in apply_errors:
print(
">> Repo: {0}\tPatch: {1}\n Error: {2}".format(
error["repo_name"], error["patch_name"], error["error"]
)
)
else:
print("No Failures")
```
|
{
"source": "jepler/Adafruit_CircuitPython_DisplayIO_Color_Picker",
"score": 2
}
|
#### File: Adafruit_CircuitPython_DisplayIO_Color_Picker/adafruit_displayio_color_picker/color_picker.py
```python
import math
from displayio import TileGrid, OnDiskBitmap, ColorConverter
from adafruit_displayio_layout.widgets.widget import Widget
from adafruit_displayio_layout.widgets.control import Control
__version__ = "0.0.0-auto.0"
__repo__ = (
"https://github.com/adafruit/Adafruit_CircuitPython_DisplayIO_Color_Picker.git"
)
# pylint: disable=missing-class-docstring, too-few-public-methods
class ColorPicker(Widget, Control):
"""A widget to be used to select colors from a heel.
:param str filename: name of the bitmap file to be used as a ColorPicker
:param int x: x position of the color picker origin
:param int y: y position of the color picker origin
:param int imagesize: size of the bitmap file. The bitmap colorwheels are squares.
**Quickstart: Importing and using the Color Picker**
Here is one way of importing the ``ColorPicker`` class so you can use:
.. code-block:: python
from adafruit_displayio_color_picker import color_picker
Now you can create an Slider at pixel position x=20, y=30 using:
.. code-block:: python
my_colorpicker=color_picker.ColorPicker(x=20, y=30)
Once you setup your display, you can now add ``my_colorpicker`` to your display using:
.. code-block:: python
display.show(my_colorpicker) # add the group to the display
If you want to have multiple display elements, you can create a group and then
append the slider and the other elements to the group. Then, you can add the full
group to the display as in this example:
.. code-block:: python
my_colorpicker= ColorPicker(20, 30)
my_group = displayio.Group(max_size=2) # make a group that can hold 2 items
my_group.append(my_colorpicker) # Add my_colorpicker to the group
#
# Append other display elements to the group
#
display.show(my_group) # add the group to the display
**Final Notes**
Depending on the screen results may vary. Resolution of the bitmap will no be as seen in a PC.
Sensitivity of the screen could also affect the behaviour of the library.
**The Color Picker Widget**
.. figure:: color_picker.png
:scale: 100 %
:align: center
:alt: Example of the color picker widget.
Example of the color picker. representation will vary according to screen used.
"""
# pylint: disable=too-many-lines, too-many-instance-attributes, too-many-arguments
# pylint: disable=too-many-locals, too-many-statements
def __init__(
self,
filename: str = None,
x: int = 0,
y: int = 0,
image_size: int = 100,
**kwargs,
):
Widget.__init__(
self, x=x, y=y, height=image_size, width=image_size, **kwargs, max_size=4
)
Control.__init__(self)
self._file = open(filename, "rb")
image = OnDiskBitmap(self._file)
tile_grid = TileGrid(image, pixel_shader=ColorConverter())
self._image_size = image_size
self.append(tile_grid)
self.touch_boundary = (
0,
0,
image.width,
image.height,
)
def contains(self, touch_point): # overrides, then calls Control.contains(x,y)
"""Checks if the ColorPicker was touched. Returns True if the touch_point is
within the ColorPicker's touch_boundary.
:param touch_point: x,y location of the screen, converted to local coordinates.
:type touch_point: Tuple[x,y]
:return: Boolean
"""
touch_x = (
touch_point[0] - self.x
) # adjust touch position for the local position
touch_y = touch_point[1] - self.y
return super().contains((touch_x, touch_y, 0))
def when_selected(self, touch_point, screen_height):
"""Response function when ColorPicker is selected. When selected, the ColorPicker
will give the color corresponding with the position
:param touch_point: x,y location of the screen, in absolute display coordinates.
:param int screen_height: screen height
:return: Color
"""
touch_x = (
touch_point[0] - self.x
) # adjust touch position for the local position
touch_y = screen_height - touch_point[1] - self.y
# Call the parent's .selected function in case there is any work up there.
# touch_point is adjusted for group's x,y position before sending to super()
super().selected((touch_x, touch_y, 0))
return self._color_from_position(touch_x, touch_y, self._image_size)
def _color_from_position(self, x, y, image_size):
img_half = image_size // 2
dist = abs(math.sqrt((x - img_half) ** 2 + (y - img_half) ** 2))
if x - img_half == 0:
angle = -90
if y > img_half:
angle = 90
else:
angle = math.atan2((y - img_half), (x - img_half)) * 180 / math.pi
angle = (angle + 30) % 360
shade = 1 * dist / img_half
idx = angle / 60
base = int(round(idx))
adj = (6 + base + (-1 if base > idx else 1)) % 6
ratio = max(idx, base) - min(idx, base)
color = self._make_color(base, adj, ratio, shade)
return color
@staticmethod
def _make_color(base, adj, ratio, shade):
"""
Go through each bit of the colors adjusting blue with blue, red with red,
green with green, etc.
"""
color_wheel = [
[0xFF, 0x00, 0xFF],
[0xFF, 0x00, 0x00],
[0xFF, 0xFF, 0x00],
[0x00, 0xFF, 0x00],
[0x00, 0xFF, 0xFF],
[0x00, 0x00, 0xFF],
[0xFF, 0x00, 0xFF],
]
output = 0x0
bit = 0
for pos in range(3):
base_chan = color_wheel[base][pos]
adj_chan = color_wheel[adj][pos]
new_chan = int(round(base_chan * (1 - ratio) + adj_chan * ratio))
# now alter the channel by the shade
if shade < 1:
new_chan = new_chan * shade
elif shade > 1:
shade_ratio = shade - 1
new_chan = (0xFF * shade_ratio) + (new_chan * (1 - shade_ratio))
output = output + (int(new_chan) << bit)
bit = bit + 8
return output
```
|
{
"source": "jepler/Adafruit_CircuitPython_OV7670",
"score": 2
}
|
#### File: jepler/Adafruit_CircuitPython_OV7670/adafruit_ov7670.py
```python
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ov7670.git"
import time
import digitalio
import imagecapture
import pwmio
from adafruit_bus_device.i2c_device import I2CDevice
from micropython import const
# Supported color formats
OV7670_COLOR_RGB = 0 # RGB565 big-endian
OV7670_COLOR_YUV = 1 # YUV/YCbCr 4:2:2 big-endian
# Supported sizes (VGA division factor) for OV7670_set_size()
OV7670_SIZE_DIV1 = 0 # 640 x 480
OV7670_SIZE_DIV2 = 1 # 320 x 240
OV7670_SIZE_DIV4 = 2 # 160 x 120
OV7670_SIZE_DIV8 = 3 # 80 x 60
OV7670_SIZE_DIV16 = 4 # 40 x 30
# Test patterns
OV7670_TEST_PATTERN_NONE = 0 # Disable test pattern
OV7670_TEST_PATTERN_SHIFTING_1 = 1 # "Shifting 1" pattern
OV7670_TEST_PATTERN_COLOR_BAR = 2 # 8 color bars
OV7670_TEST_PATTERN_COLOR_BAR_FADE = 3 # Color bars w/fade to white
# Table of bit patterns for the different supported night modes.
# There's a "same frame rate" option for OV7670 night mode but it
# doesn't seem to do anything useful and can be skipped over.
OV7670_NIGHT_MODE_OFF = 0 # Disable night mode
OV7670_NIGHT_MODE_2 = 0b10100000 # Night mode 1/2 frame rate
OV7670_NIGHT_MODE_4 = 0b11000000 # Night mode 1/4 frame rate
OV7670_NIGHT_MODE_8 = 0b11100000 # Night mode 1/8 frame rate
OV7670_ADDR = 0x21 # Default I2C address if unspecified
_OV7670_REG_GAIN = const(0x00) # AGC gain bits 7:0 (9:8 in VREF)
_OV7670_REG_BLUE = const(0x01) # AWB blue channel gain
_OV7670_REG_RED = const(0x02) # AWB red channel gain
_OV7670_REG_VREF = const(0x03) # Vert frame control bits
_OV7670_REG_COM1 = const(0x04) # Common control 1
_OV7670_COM1_R656 = const(0x40) # COM1 enable R656 format
_OV7670_REG_BAVE = const(0x05) # U/B average level
_OV7670_REG_GbAVE = const(0x06) # Y/Gb average level
_OV7670_REG_AECHH = const(0x07) # Exposure value - AEC 15:10 bits
_OV7670_REG_RAVE = const(0x08) # V/R average level
_OV7670_REG_COM2 = const(0x09) # Common control 2
_OV7670_COM2_SSLEEP = const(0x10) # COM2 soft sleep mode
_OV7670_REG_PID = const(0x0A) # Product ID MSB (read-only)
_OV7670_REG_VER = const(0x0B) # Product ID LSB (read-only)
_OV7670_REG_COM3 = const(0x0C) # Common control 3
_OV7670_COM3_SWAP = const(0x40) # COM3 output data MSB/LSB swap
_OV7670_COM3_SCALEEN = const(0x08) # COM3 scale enable
_OV7670_COM3_DCWEN = const(0x04) # COM3 DCW enable
_OV7670_REG_COM4 = const(0x0D) # Common control 4
_OV7670_REG_COM5 = const(0x0E) # Common control 5
_OV7670_REG_COM6 = const(0x0F) # Common control 6
_OV7670_REG_AECH = const(0x10) # Exposure value 9:2
_OV7670_REG_CLKRC = const(0x11) # Internal clock
_OV7670_CLK_EXT = const(0x40) # CLKRC Use ext clock directly
_OV7670_CLK_SCALE = const(0x3F) # CLKRC Int clock prescale mask
_OV7670_REG_COM7 = const(0x12) # Common control 7
_OV7670_COM7_RESET = const(0x80) # COM7 SCCB register reset
_OV7670_COM7_SIZE_MASK = const(0x38) # COM7 output size mask
_OV7670_COM7_PIXEL_MASK = const(0x05) # COM7 output pixel format mask
_OV7670_COM7_SIZE_VGA = const(0x00) # COM7 output size VGA
_OV7670_COM7_SIZE_CIF = const(0x20) # COM7 output size CIF
_OV7670_COM7_SIZE_QVGA = const(0x10) # COM7 output size QVGA
_OV7670_COM7_SIZE_QCIF = const(0x08) # COM7 output size QCIF
_OV7670_COM7_RGB = const(0x04) # COM7 pixel format RGB
_OV7670_COM7_YUV = const(0x00) # COM7 pixel format YUV
_OV7670_COM7_BAYER = const(0x01) # COM7 pixel format Bayer RAW
_OV7670_COM7_PBAYER = const(0x05) # COM7 pixel fmt proc Bayer RAW
_OV7670_COM7_COLORBAR = const(0x02) # COM7 color bar enable
_OV7670_REG_COM8 = const(0x13) # Common control 8
_OV7670_COM8_FASTAEC = const(0x80) # COM8 Enable fast AGC/AEC algo,
_OV7670_COM8_AECSTEP = const(0x40) # COM8 AEC step size unlimited
_OV7670_COM8_BANDING = const(0x20) # COM8 Banding filter enable
_OV7670_COM8_AGC = const(0x04) # COM8 AGC (auto gain) enable
_OV7670_COM8_AWB = const(0x02) # COM8 AWB (auto white balance)
_OV7670_COM8_AEC = const(0x01) # COM8 AEC (auto exposure) enable
_OV7670_REG_COM9 = const(0x14) # Common control 9 - max AGC value
_OV7670_REG_COM10 = const(0x15) # Common control 10
_OV7670_COM10_HSYNC = const(0x40) # COM10 HREF changes to HSYNC
_OV7670_COM10_PCLK_HB = const(0x20) # COM10 Suppress PCLK on hblank
_OV7670_COM10_HREF_REV = const(0x08) # COM10 HREF reverse
_OV7670_COM10_VS_EDGE = const(0x04) # COM10 VSYNC chg on PCLK rising
_OV7670_COM10_VS_NEG = const(0x02) # COM10 VSYNC negative
_OV7670_COM10_HS_NEG = const(0x01) # COM10 HSYNC negative
_OV7670_REG_HSTART = const(0x17) # Horiz frame start high bits
_OV7670_REG_HSTOP = const(0x18) # Horiz frame end high bits
_OV7670_REG_VSTART = const(0x19) # Vert frame start high bits
_OV7670_REG_VSTOP = const(0x1A) # Vert frame end high bits
_OV7670_REG_PSHFT = const(0x1B) # Pixel delay select
_OV7670_REG_MIDH = const(0x1C) # Manufacturer ID high byte
_OV7670_REG_MIDL = const(0x1D) # Manufacturer ID low byte
_OV7670_REG_MVFP = const(0x1E) # Mirror / vert-flip enable
_OV7670_MVFP_MIRROR = const(0x20) # MVFP Mirror image
_OV7670_MVFP_VFLIP = const(0x10) # MVFP Vertical flip
_OV7670_REG_LAEC = const(0x1F) # Reserved
_OV7670_REG_ADCCTR0 = const(0x20) # ADC control
_OV7670_REG_ADCCTR1 = const(0x21) # Reserved
_OV7670_REG_ADCCTR2 = const(0x22) # Reserved
_OV7670_REG_ADCCTR3 = const(0x23) # Reserved
_OV7670_REG_AEW = const(0x24) # AGC/AEC upper limit
_OV7670_REG_AEB = const(0x25) # AGC/AEC lower limit
_OV7670_REG_VPT = const(0x26) # AGC/AEC fast mode op region
_OV7670_REG_BBIAS = const(0x27) # B channel signal output bias
_OV7670_REG_GbBIAS = const(0x28) # Gb channel signal output bias
_OV7670_REG_EXHCH = const(0x2A) # Dummy pixel insert MSB
_OV7670_REG_EXHCL = const(0x2B) # Dummy pixel insert LSB
_OV7670_REG_RBIAS = const(0x2C) # R channel signal output bias
_OV7670_REG_ADVFL = const(0x2D) # Insert dummy lines MSB
_OV7670_REG_ADVFH = const(0x2E) # Insert dummy lines LSB
_OV7670_REG_YAVE = const(0x2F) # Y/G channel average value
_OV7670_REG_HSYST = const(0x30) # HSYNC rising edge delay
_OV7670_REG_HSYEN = const(0x31) # HSYNC falling edge delay
_OV7670_REG_HREF = const(0x32) # HREF control
_OV7670_REG_CHLF = const(0x33) # Array current control
_OV7670_REG_ARBLM = const(0x34) # Array ref control - reserved
_OV7670_REG_ADC = const(0x37) # ADC control - reserved
_OV7670_REG_ACOM = const(0x38) # ADC & analog common - reserved
_OV7670_REG_OFON = const(0x39) # ADC offset control - reserved
_OV7670_REG_TSLB = const(0x3A) # Line buffer test option
_OV7670_TSLB_NEG = const(0x20) # TSLB Negative image enable
_OV7670_TSLB_YLAST = const(0x04) # TSLB UYVY or VYUY, see COM13
_OV7670_TSLB_AOW = const(0x01) # TSLB Auto output window
_OV7670_REG_COM11 = const(0x3B) # Common control 11
_OV7670_COM11_NIGHT = const(0x80) # COM11 Night mode
_OV7670_COM11_NMFR = const(0x60) # COM11 Night mode frame rate mask
_OV7670_COM11_HZAUTO = const(0x10) # COM11 Auto detect 50/60 Hz
_OV7670_COM11_BAND = const(0x08) # COM11 Banding filter val select
_OV7670_COM11_EXP = const(0x02) # COM11 Exposure timing control
_OV7670_REG_COM12 = const(0x3C) # Common control 12
_OV7670_COM12_HREF = const(0x80) # COM12 Always has HREF
_OV7670_REG_COM13 = const(0x3D) # Common control 13
_OV7670_COM13_GAMMA = const(0x80) # COM13 Gamma enable
_OV7670_COM13_UVSAT = const(0x40) # COM13 UV saturation auto adj
_OV7670_COM13_UVSWAP = const(0x01) # COM13 UV swap, use w TSLB[3]
_OV7670_REG_COM14 = const(0x3E) # Common control 14
_OV7670_COM14_DCWEN = const(0x10) # COM14 DCW & scaling PCLK enable
_OV7670_REG_EDGE = const(0x3F) # Edge enhancement adjustment
_OV7670_REG_COM15 = const(0x40) # Common control 15
_OV7670_COM15_RMASK = const(0xC0) # COM15 Output range mask
_OV7670_COM15_R10F0 = const(0x00) # COM15 Output range 10 to F0
_OV7670_COM15_R01FE = const(0x80) # COM15 01 to FE
_OV7670_COM15_R00FF = const(0xC0) # COM15 00 to FF
_OV7670_COM15_RGBMASK = const(0x30) # COM15 RGB 555/565 option mask
_OV7670_COM15_RGB = const(0x00) # COM15 Normal RGB out
_OV7670_COM15_RGB565 = const(0x10) # COM15 RGB 565 output
_OV7670_COM15_RGB555 = const(0x30) # COM15 RGB 555 output
_OV7670_REG_COM16 = const(0x41) # Common control 16
_OV7670_COM16_AWBGAIN = const(0x08) # COM16 AWB gain enable
_OV7670_REG_COM17 = const(0x42) # Common control 17
_OV7670_COM17_AECWIN = const(0xC0) # COM17 AEC window must match COM4
_OV7670_COM17_CBAR = const(0x08) # COM17 DSP Color bar enable
_OV7670_REG_AWBC1 = const(0x43) # Reserved
_OV7670_REG_AWBC2 = const(0x44) # Reserved
_OV7670_REG_AWBC3 = const(0x45) # Reserved
_OV7670_REG_AWBC4 = const(0x46) # Reserved
_OV7670_REG_AWBC5 = const(0x47) # Reserved
_OV7670_REG_AWBC6 = const(0x48) # Reserved
_OV7670_REG_REG4B = const(0x4B) # UV average enable
_OV7670_REG_DNSTH = const(0x4C) # De-noise strength
_OV7670_REG_MTX1 = const(0x4F) # Matrix coefficient 1
_OV7670_REG_MTX2 = const(0x50) # Matrix coefficient 2
_OV7670_REG_MTX3 = const(0x51) # Matrix coefficient 3
_OV7670_REG_MTX4 = const(0x52) # Matrix coefficient 4
_OV7670_REG_MTX5 = const(0x53) # Matrix coefficient 5
_OV7670_REG_MTX6 = const(0x54) # Matrix coefficient 6
_OV7670_REG_BRIGHT = const(0x55) # Brightness control
_OV7670_REG_CONTRAS = const(0x56) # Contrast control
_OV7670_REG_CONTRAS_CENTER = const(0x57) # Contrast center
_OV7670_REG_MTXS = const(0x58) # Matrix coefficient sign
_OV7670_REG_LCC1 = const(0x62) # Lens correction option 1
_OV7670_REG_LCC2 = const(0x63) # Lens correction option 2
_OV7670_REG_LCC3 = const(0x64) # Lens correction option 3
_OV7670_REG_LCC4 = const(0x65) # Lens correction option 4
_OV7670_REG_LCC5 = const(0x66) # Lens correction option 5
_OV7670_REG_MANU = const(0x67) # Manual U value
_OV7670_REG_MANV = const(0x68) # Manual V value
_OV7670_REG_GFIX = const(0x69) # Fix gain control
_OV7670_REG_GGAIN = const(0x6A) # G channel AWB gain
_OV7670_REG_DBLV = const(0x6B) # PLL & regulator control
_OV7670_REG_AWBCTR3 = const(0x6C) # AWB control 3
_OV7670_REG_AWBCTR2 = const(0x6D) # AWB control 2
_OV7670_REG_AWBCTR1 = const(0x6E) # AWB control 1
_OV7670_REG_AWBCTR0 = const(0x6F) # AWB control 0
_OV7670_REG_SCALING_XSC = const(0x70) # Test pattern X scaling
_OV7670_REG_SCALING_YSC = const(0x71) # Test pattern Y scaling
_OV7670_REG_SCALING_DCWCTR = const(0x72) # DCW control
_OV7670_REG_SCALING_PCLK_DIV = const(0x73) # DSP scale control clock divide
_OV7670_REG_REG74 = const(0x74) # Digital gain control
_OV7670_REG_REG76 = const(0x76) # Pixel correction
_OV7670_REG_SLOP = const(0x7A) # Gamma curve highest seg slope
_OV7670_REG_GAM_BASE = const(0x7B) # Gamma register base (1 of 15)
_OV7670_GAM_LEN = const(15) # Number of gamma registers
_OV7670_R76_BLKPCOR = const(0x80) # REG76 black pixel corr enable
_OV7670_R76_WHTPCOR = const(0x40) # REG76 white pixel corr enable
_OV7670_REG_RGB444 = const(0x8C) # RGB 444 control
_OV7670_R444_ENABLE = const(0x02) # RGB444 enable
_OV7670_R444_RGBX = const(0x01) # RGB444 word format
_OV7670_REG_DM_LNL = const(0x92) # Dummy line LSB
_OV7670_REG_LCC6 = const(0x94) # Lens correction option 6
_OV7670_REG_LCC7 = const(0x95) # Lens correction option 7
_OV7670_REG_HAECC1 = const(0x9F) # Histogram-based AEC/AGC ctrl 1
_OV7670_REG_HAECC2 = const(0xA0) # Histogram-based AEC/AGC ctrl 2
_OV7670_REG_SCALING_PCLK_DELAY = const(0xA2) # Scaling pixel clock delay
_OV7670_REG_BD50MAX = const(0xA5) # 50 Hz banding step limit
_OV7670_REG_HAECC3 = const(0xA6) # Histogram-based AEC/AGC ctrl 3
_OV7670_REG_HAECC4 = const(0xA7) # Histogram-based AEC/AGC ctrl 4
_OV7670_REG_HAECC5 = const(0xA8) # Histogram-based AEC/AGC ctrl 5
_OV7670_REG_HAECC6 = const(0xA9) # Histogram-based AEC/AGC ctrl 6
_OV7670_REG_HAECC7 = const(0xAA) # Histogram-based AEC/AGC ctrl 7
_OV7670_REG_BD60MAX = const(0xAB) # 60 Hz banding step limit
_OV7670_REG_ABLC1 = const(0xB1) # ABLC enable
_OV7670_REG_THL_ST = const(0xB3) # ABLC target
_OV7670_REG_SATCTR = const(0xC9) # Saturation control
_OV7670_REG_LAST = const(_OV7670_REG_SATCTR) # Maximum register address
# Manual output format, RGB, use RGB565 and full 0-255 output range
_OV7670_rgb = bytes(
[
_OV7670_REG_COM7,
_OV7670_COM7_RGB,
_OV7670_REG_RGB444,
0,
_OV7670_REG_COM15,
_OV7670_COM15_RGB565 | _OV7670_COM15_R00FF,
]
)
# Manual output format, YUV, use full output range
_OV7670_yuv = bytes(
[
_OV7670_REG_COM7,
_OV7670_COM7_YUV,
_OV7670_REG_COM15,
_OV7670_COM15_R00FF,
]
)
_OV7670_init = bytes(
[
_OV7670_REG_TSLB,
_OV7670_TSLB_YLAST, # No auto window
_OV7670_REG_COM10,
_OV7670_COM10_VS_NEG, # -VSYNC (req by SAMD PCC)
_OV7670_REG_SLOP,
0x20,
_OV7670_REG_GAM_BASE,
0x1C,
_OV7670_REG_GAM_BASE + 1,
0x28,
_OV7670_REG_GAM_BASE + 2,
0x3C,
_OV7670_REG_GAM_BASE + 3,
0x55,
_OV7670_REG_GAM_BASE + 4,
0x68,
_OV7670_REG_GAM_BASE + 5,
0x76,
_OV7670_REG_GAM_BASE + 6,
0x80,
_OV7670_REG_GAM_BASE + 7,
0x88,
_OV7670_REG_GAM_BASE + 8,
0x8F,
_OV7670_REG_GAM_BASE + 9,
0x96,
_OV7670_REG_GAM_BASE + 10,
0xA3,
_OV7670_REG_GAM_BASE + 11,
0xAF,
_OV7670_REG_GAM_BASE + 12,
0xC4,
_OV7670_REG_GAM_BASE + 13,
0xD7,
_OV7670_REG_GAM_BASE + 14,
0xE8,
_OV7670_REG_COM8,
_OV7670_COM8_FASTAEC | _OV7670_COM8_AECSTEP | _OV7670_COM8_BANDING,
_OV7670_REG_GAIN,
0x00,
_OV7670_COM2_SSLEEP,
0x00,
_OV7670_REG_COM4,
0x00,
_OV7670_REG_COM9,
0x20, # Max AGC value
_OV7670_REG_BD50MAX,
0x05,
_OV7670_REG_BD60MAX,
0x07,
_OV7670_REG_AEW,
0x75,
_OV7670_REG_AEB,
0x63,
_OV7670_REG_VPT,
0xA5,
_OV7670_REG_HAECC1,
0x78,
_OV7670_REG_HAECC2,
0x68,
0xA1,
0x03, # Reserved register?
_OV7670_REG_HAECC3,
0xDF, # Histogram-based AEC/AGC setup
_OV7670_REG_HAECC4,
0xDF,
_OV7670_REG_HAECC5,
0xF0,
_OV7670_REG_HAECC6,
0x90,
_OV7670_REG_HAECC7,
0x94,
_OV7670_REG_COM8,
_OV7670_COM8_FASTAEC
| _OV7670_COM8_AECSTEP
| _OV7670_COM8_BANDING
| _OV7670_COM8_AGC
| _OV7670_COM8_AEC,
_OV7670_REG_COM5,
0x61,
_OV7670_REG_COM6,
0x4B,
0x16,
0x02, # Reserved register?
_OV7670_REG_MVFP,
0x07, # 0x07,
_OV7670_REG_ADCCTR1,
0x02,
_OV7670_REG_ADCCTR2,
0x91,
0x29,
0x07, # Reserved register?
_OV7670_REG_CHLF,
0x0B,
0x35,
0x0B, # Reserved register?
_OV7670_REG_ADC,
0x1D,
_OV7670_REG_ACOM,
0x71,
_OV7670_REG_OFON,
0x2A,
_OV7670_REG_COM12,
0x78,
0x4D,
0x40, # Reserved register?
0x4E,
0x20, # Reserved register?
_OV7670_REG_GFIX,
0x5D,
_OV7670_REG_REG74,
0x19,
0x8D,
0x4F, # Reserved register?
0x8E,
0x00, # Reserved register?
0x8F,
0x00, # Reserved register?
0x90,
0x00, # Reserved register?
0x91,
0x00, # Reserved register?
_OV7670_REG_DM_LNL,
0x00,
0x96,
0x00, # Reserved register?
0x9A,
0x80, # Reserved register?
0xB0,
0x84, # Reserved register?
_OV7670_REG_ABLC1,
0x0C,
0xB2,
0x0E, # Reserved register?
_OV7670_REG_THL_ST,
0x82,
0xB8,
0x0A, # Reserved register?
_OV7670_REG_AWBC1,
0x14,
_OV7670_REG_AWBC2,
0xF0,
_OV7670_REG_AWBC3,
0x34,
_OV7670_REG_AWBC4,
0x58,
_OV7670_REG_AWBC5,
0x28,
_OV7670_REG_AWBC6,
0x3A,
0x59,
0x88, # Reserved register?
0x5A,
0x88, # Reserved register?
0x5B,
0x44, # Reserved register?
0x5C,
0x67, # Reserved register?
0x5D,
0x49, # Reserved register?
0x5E,
0x0E, # Reserved register?
_OV7670_REG_LCC3,
0x04,
_OV7670_REG_LCC4,
0x20,
_OV7670_REG_LCC5,
0x05,
_OV7670_REG_LCC6,
0x04,
_OV7670_REG_LCC7,
0x08,
_OV7670_REG_AWBCTR3,
0x0A,
_OV7670_REG_AWBCTR2,
0x55,
_OV7670_REG_MTX1,
0x80,
_OV7670_REG_MTX2,
0x80,
_OV7670_REG_MTX3,
0x00,
_OV7670_REG_MTX4,
0x22,
_OV7670_REG_MTX5,
0x5E,
_OV7670_REG_MTX6,
0x80, # 0x40?
_OV7670_REG_AWBCTR1,
0x11,
_OV7670_REG_AWBCTR0,
0x9F, # Or use 0x9E for advance AWB
_OV7670_REG_BRIGHT,
0x00,
_OV7670_REG_CONTRAS,
0x40,
_OV7670_REG_CONTRAS_CENTER,
0x80, # 0x40?
]
)
_window = [
[9, 162, 2, 2], # SIZE_DIV1 640x480 VGA
[10, 174, 4, 2], # SIZE_DIV2 320x240 QVGA
[11, 186, 2, 2], # SIZE_DIV4 160x120 QQVGA
[12, 210, 0, 2], # SIZE_DIV8 80x60 ...
[15, 252, 3, 2], # SIZE_DIV16 40x30
]
class OV7670: # pylint: disable=too-many-instance-attributes
"""Library for the OV7670 digital camera"""
def __init__(
self,
i2c_bus,
data0,
clock,
vsync,
href,
shutdown=None,
reset=None,
mclk=None,
mclk_frequency=16_000_000,
colorspace=OV7670_COLOR_RGB,
i2c_address=0x21,
): # pylint: disable=too-many-arguments
"""
Args:
i2c_bus (busio.I2C): The I2C bus used to configure the OV7670
i2c_address (int): The I2C address of the camera
data0 (microcontroller.Pin): The first of 8 parallel data capture pins
clock (microcontroller.Pin): The pixel clock from the OV7670
vsync (microcontroller.Pin): The vsync signal from the OV7670
href (microcontroller.Pin): The href signal from the OV7670
shutdown: The microcontroller.Pin that controls the camera's \
shutdown signal, also called the powerdown or enable pin, or \
None
reset: The microcontroller.Pin that controls the camera's reset \
signal, or enable pin, or None
mclk: The pin on which to create a master clock signal, or None
mclk_frequency: The frequency of the master clock to generate, \
ignored if mclk is None
colorspace: The colorspace to operate in
size: The size of image to capture
"""
# Initialize the master clock
if mclk:
self._mclk_pwm = pwmio.PWMOut(mclk, frequency=mclk_frequency)
self._mclk_pwm.duty_cycle = 32768
else:
self._mclk_pwm = None
if shutdown:
self._shutdown = digitalio.DigitalInOut(shutdown)
self._shutdown.switch_to_output(True)
time.sleep(0.001)
self._shutdown.switch_to_output(False)
time.sleep(0.3)
else:
self._shutdown = None
if reset:
self._reset = digitalio.DigitalInOut(reset)
self._reset.switch_to_output(False)
time.sleep(0.001)
self._reset.switch_to_output(True)
self._i2c_device = I2CDevice(i2c_bus, i2c_address)
if not reset:
self._write_register(_OV7670_REG_COM7, _OV7670_COM7_RESET)
time.sleep(0.001)
self._colorspace = None
self.colorspace = colorspace
self._write_list(_OV7670_init)
self._size = None
self.size = OV7670_SIZE_DIV8
self._test_pattern = None
self.test_pattern = OV7670_TEST_PATTERN_NONE
self._flip_x = False
self._flip_y = False
self._night = OV7670_NIGHT_MODE_OFF
self._imagecapture = imagecapture.ParallelImageCapture(
data0=data0, clock=clock, vsync=vsync, href=href
)
def capture(self, buf):
"""Capture an image into the buffer."""
self._imagecapture.capture(buf)
@property
def mclk_frequency(self):
"""Get the actual frequency the generated mclk, or None"""
return self._mclk_pwm.frequency if self._mclk_pwm else None
@property
def width(self):
"""Get the image width in pixels. A buffer of 2*width*height bytes \
stores a whole image."""
return 640 >> self._size
@property
def height(self):
"""Get the image height in pixels. A buffer of 2*width*height bytes \
stores a whole image."""
return 480 >> self._size
@property
def colorspace(self):
"""Get or set the colorspace"""
return self._colorspace
@colorspace.setter
def colorspace(self, colorspace):
self._colorspace = colorspace
self._write_list(_OV7670_rgb if colorspace == OV7670_COLOR_RGB else _OV7670_yuv)
def deinit(self):
"""Deinitialize the camera"""
if self._mclk_pwm:
self._mclk_pwm.deinit()
if self._shutdown:
self._shutdown.deinit()
if self._reset:
self._reset.deinit()
@property
def size(self):
"""Get or set the captured image size"""
return self._size
@size.setter
def size(self, size):
self._frame_control(size, *_window[size])
self._size = size
@property
def test_pattern(self):
"""Get or set the test pattern"""
return self._test_pattern
@test_pattern.setter
def test_pattern(self, pattern):
# Modify only test pattern bits (not scaling bits)
xsc = self._read_register(_OV7670_REG_SCALING_XSC) & ~0x80
ysc = self._read_register(_OV7670_REG_SCALING_YSC) & ~0x80
if pattern & 1:
xsc |= 0x80
if pattern & 3:
ysc |= 0x80
# Write modified result back to SCALING_XSC and SCALING_YSC
self._write_register(_OV7670_REG_SCALING_XSC, xsc)
self._write_register(_OV7670_REG_SCALING_YSC, ysc)
def _set_flip(self):
mvfp = self._read_register(_OV7670_REG_MVFP)
if self._flip_x:
mvfp |= _OV7670_MVFP_MIRROR
else:
mvfp &= ~_OV7670_MVFP_MIRROR
if self._flip_y:
mvfp |= _OV7670_MVFP_VFLIP
else:
mvfp &= ~_OV7670_MVFP_VFLIP
self._write_register(_OV7670_REG_MVFP, mvfp)
@property
def flip_x(self):
"""Get or set the X-flip flag"""
return self._flip_x
@flip_x.setter
def flip_x(self, value):
self._flip_x = bool(value)
self._set_flip()
@property
def flip_y(self):
"""Get or set the Y-flip flag"""
return self._flip_y
@flip_y.setter
def flip_y(self, value):
self._flip_y = bool(value)
self._set_flip()
@property
def night(self):
"""Get or set the night-vision mode"""
return self._night
@night.setter
def night(self, value):
com11 = self._read_register(_OV7670_REG_COM11)
com11 = (com11 & 0b00011111) | value
self._write_register(_OV7670_REG_COM11, com11)
self._night = value
@property
def product_id(self):
"""Get the product id (PID) register"""
return self._read_register(_OV7670_REG_PID)
@property
def product_version(self):
"""Get the version (VER) register"""
return self._read_register(_OV7670_REG_VER)
def _write_list(self, reg_list):
for i in range(0, len(reg_list), 2):
self._write_register(reg_list[i], reg_list[i + 1])
time.sleep(0.001)
def _write_register(self, reg, value):
b = bytearray(2)
b[0] = reg
b[1] = value
with self._i2c_device as i2c:
i2c.write(b)
def _read_register(self, reg):
b = bytearray(1)
b[0] = reg
with self._i2c_device as i2c:
i2c.write(b)
i2c.readinto(b)
return b[0]
def _frame_control(
self, size, vstart, hstart, edge_offset, pclk_delay
): # pylint: disable=too-many-arguments
# Enable downsampling if sub-VGA, and zoom if 1:16 scale
value = _OV7670_COM3_DCWEN if (size > OV7670_SIZE_DIV1) else 0
if size == OV7670_SIZE_DIV16:
value |= _OV7670_COM3_SCALEEN
self._write_register(_OV7670_REG_COM3, value)
# Enable PCLK division if sub-VGA 2,4,8,16 = 0x19,1A,1B,1C
value = (0x18 + size) if (size > OV7670_SIZE_DIV1) else 0
self._write_register(_OV7670_REG_COM14, value)
# Horiz/vert downsample ratio, 1:8 max (H,V are always equal for now)
value = size if (size <= OV7670_SIZE_DIV8) else OV7670_SIZE_DIV8
self._write_register(_OV7670_REG_SCALING_DCWCTR, value * 0x11)
# Pixel clock divider if sub-VGA
value = (0xF0 + size) if (size > OV7670_SIZE_DIV1) else 0x08
self._write_register(_OV7670_REG_SCALING_PCLK_DIV, value)
# Apply 0.5 digital zoom at 1:16 size (others are downsample only)
value = 0x40 if (size == OV7670_SIZE_DIV16) else 0x20 # 0.5, 1.0
# Read current SCALING_XSC and SCALING_YSC register values because
# test pattern settings are also stored in those registers and we
# don't want to corrupt anything there.
xsc = self._read_register(_OV7670_REG_SCALING_XSC)
ysc = self._read_register(_OV7670_REG_SCALING_YSC)
xsc = (xsc & 0x80) | value # Modify only scaling bits (not test pattern)
ysc = (ysc & 0x80) | value
# Write modified result back to SCALING_XSC and SCALING_YSC
self._write_register(_OV7670_REG_SCALING_XSC, xsc)
self._write_register(_OV7670_REG_SCALING_YSC, ysc)
# Window size is scattered across multiple registers.
# Horiz/vert stops can be automatically calc'd from starts.
vstop = vstart + 480
hstop = (hstart + 640) % 784
self._write_register(_OV7670_REG_HSTART, hstart >> 3)
self._write_register(_OV7670_REG_HSTOP, hstop >> 3)
self._write_register(
_OV7670_REG_HREF,
(edge_offset << 6) | ((hstop & 0b111) << 3) | (hstart & 0b111),
)
self._write_register(_OV7670_REG_VSTART, vstart >> 2)
self._write_register(_OV7670_REG_VSTOP, vstop >> 2)
self._write_register(_OV7670_REG_VREF, ((vstop & 0b11) << 2) | (vstart & 0b11))
self._write_register(_OV7670_REG_SCALING_PCLK_DELAY, pclk_delay)
```
|
{
"source": "jepler/Adafruit_CircuitPython_Pypixelbuf",
"score": 2
}
|
#### File: Adafruit_CircuitPython_Pypixelbuf/examples/adafruit_pypixelbuf_simpletest.py
```python
import adafruit_pypixelbuf
class TestBuf(adafruit_pypixelbuf.PixelBuf):
called = False
def show(self):
self.called = True
buffer = TestBuf(20, bytearray(20 * 3), "RGB", 1.0, auto_write=True)
buffer[0] = (1, 2, 3)
print(buffer[0])
print(buffer[0:2])
print(buffer[0:2:2])
print(buffer.called)
```
|
{
"source": "jepler/Adafruit_CircuitPython_Register",
"score": 3
}
|
#### File: Adafruit_CircuitPython_Register/examples/register_simpletest.py
```python
from board import SCL, SDA
from busio import I2C
from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_register.i2c_struct import Struct
DEVICE_ADDRESS = 0x40 # device address of PCA9685 board
A_DEVICE_REGISTER = 0x06 # PWM 0 control register on the PCA9685 board
class DeviceControl: # pylint: disable-msg=too-few-public-methods
def __init__(self, i2c):
self.i2c_device = i2c # self.i2c_device required by Struct class
tuple_of_numbers = Struct(A_DEVICE_REGISTER, "<HH") # 2 16-bit numbers
# The follow is for I2C communications
comm_port = I2C(SCL, SDA)
device = I2CDevice(comm_port, DEVICE_ADDRESS)
registers = DeviceControl(device)
# set the bits in the device
registers.tuple_of_numbers = (0, 0x00FF)
# display the device values for the bits
print("register 1: {}; register 2: {}".format(*registers.tuple_of_numbers))
# toggle the bits
registers.tuple_of_numbers = (0x1000, 0)
# display the device values for the bits
print("register 1: {}; register 2: {}".format(*registers.tuple_of_numbers))
```
|
{
"source": "jepler/Adafruit_CircuitPython_Requests",
"score": 3
}
|
#### File: Adafruit_CircuitPython_Requests/tests/legacy_mocket.py
```python
from unittest import mock
SOCK_STREAM = 0
set_interface = mock.Mock()
interface = mock.MagicMock()
getaddrinfo = mock.Mock()
socket = mock.Mock()
class Mocket:
def __init__(self, response):
self.settimeout = mock.Mock()
self.close = mock.Mock()
self.connect = mock.Mock()
self.send = mock.Mock(side_effect=self._send)
self.readline = mock.Mock(side_effect=self._readline)
self.recv = mock.Mock(side_effect=self._recv)
self.fail_next_send = False
self._response = response
self._position = 0
def _send(self, data):
if self.fail_next_send:
self.fail_next_send = False
raise RuntimeError("Send failed")
return None
def _readline(self):
i = self._response.find(b"\r\n", self._position)
r = self._response[self._position : i + 2]
self._position = i + 2
return r
def _recv(self, count):
end = self._position + count
r = self._response[self._position : end]
self._position = end
print(r)
return r
```
#### File: Adafruit_CircuitPython_Requests/tests/parse_test.py
```python
from unittest import mock
import mocket
import json
import adafruit_requests
ip = "1.2.3.4"
host = "httpbin.org"
response = {"Date": "July 25, 2019"}
encoded = json.dumps(response).encode("utf-8")
# Padding here tests the case where a header line is exactly 32 bytes buffered by
# aligning the Content-Type header after it.
headers = "HTTP/1.0 200 OK\r\npadding: 000\r\nContent-Type: application/json\r\nContent-Length: {}\r\n\r\n".format(
len(encoded)
).encode(
"utf-8"
)
def test_json():
pool = mocket.MocketPool()
pool.getaddrinfo.return_value = ((None, None, None, None, (ip, 80)),)
sock = mocket.Mocket(headers + encoded)
pool.socket.return_value = sock
s = adafruit_requests.Session(pool)
r = s.get("http://" + host + "/get")
sock.connect.assert_called_once_with((ip, 80))
assert r.json() == response
```
|
{
"source": "jepler/Adafruit_CircuitPython_seesaw",
"score": 2
}
|
#### File: Adafruit_CircuitPython_seesaw/adafruit_seesaw/seesaw.py
```python
import time
try:
import struct
except ImportError:
import ustruct as struct
from micropython import const
from adafruit_bus_device.i2c_device import I2CDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_seesaw.git"
_STATUS_BASE = const(0x00)
_GPIO_BASE = const(0x01)
_SERCOM0_BASE = const(0x02)
_TIMER_BASE = const(0x08)
_ADC_BASE = const(0x09)
_DAC_BASE = const(0x0A)
_INTERRUPT_BASE = const(0x0B)
_DAP_BASE = const(0x0C)
_EEPROM_BASE = const(0x0D)
_NEOPIXEL_BASE = const(0x0E)
_TOUCH_BASE = const(0x0F)
_GPIO_DIRSET_BULK = const(0x02)
_GPIO_DIRCLR_BULK = const(0x03)
_GPIO_BULK = const(0x04)
_GPIO_BULK_SET = const(0x05)
_GPIO_BULK_CLR = const(0x06)
_GPIO_BULK_TOGGLE = const(0x07)
_GPIO_INTENSET = const(0x08)
_GPIO_INTENCLR = const(0x09)
_GPIO_INTFLAG = const(0x0A)
_GPIO_PULLENSET = const(0x0B)
_GPIO_PULLENCLR = const(0x0C)
_STATUS_HW_ID = const(0x01)
_STATUS_VERSION = const(0x02)
_STATUS_OPTIONS = const(0x03)
_STATUS_TEMP = const(0x04)
_STATUS_SWRST = const(0x7F)
_TIMER_STATUS = const(0x00)
_TIMER_PWM = const(0x01)
_TIMER_FREQ = const(0x02)
_ADC_STATUS = const(0x00)
_ADC_INTEN = const(0x02)
_ADC_INTENCLR = const(0x03)
_ADC_WINMODE = const(0x04)
_ADC_WINTHRESH = const(0x05)
_ADC_CHANNEL_OFFSET = const(0x07)
_SERCOM_STATUS = const(0x00)
_SERCOM_INTEN = const(0x02)
_SERCOM_INTENCLR = const(0x03)
_SERCOM_BAUD = const(0x04)
_SERCOM_DATA = const(0x05)
_NEOPIXEL_STATUS = const(0x00)
_NEOPIXEL_PIN = const(0x01)
_NEOPIXEL_SPEED = const(0x02)
_NEOPIXEL_BUF_LENGTH = const(0x03)
_NEOPIXEL_BUF = const(0x04)
_NEOPIXEL_SHOW = const(0x05)
_TOUCH_CHANNEL_OFFSET = const(0x10)
_HW_ID_CODE = const(0x55)
_EEPROM_I2C_ADDR = const(0x3F)
#TODO: update when we get real PID
_CRICKIT_PID = const(9999)
_ROBOHATMM1_PID = const(9998)
class Seesaw:
"""Driver for Seesaw i2c generic conversion trip
:param ~busio.I2C i2c_bus: Bus the SeeSaw is connected to
:param int addr: I2C address of the SeeSaw device"""
INPUT = const(0x00)
OUTPUT = const(0x01)
INPUT_PULLUP = const(0x02)
INPUT_PULLDOWN = const(0x03)
def __init__(self, i2c_bus, addr=0x49, drdy=None):
self._drdy = drdy
if drdy is not None:
drdy.switch_to_input()
self.i2c_device = I2CDevice(i2c_bus, addr)
self.sw_reset()
def sw_reset(self):
"""Trigger a software reset of the SeeSaw chip"""
self.write8(_STATUS_BASE, _STATUS_SWRST, 0xFF)
time.sleep(.500)
chip_id = self.read8(_STATUS_BASE, _STATUS_HW_ID)
if chip_id != _HW_ID_CODE:
raise RuntimeError("Seesaw hardware ID returned (0x{:x}) is not "
"correct! Expected 0x{:x}. Please check your wiring."
.format(chip_id, _HW_ID_CODE))
pid = self.get_version() >> 16
if pid == _CRICKIT_PID:
from adafruit_seesaw.crickit import Crickit_Pinmap
self.pin_mapping = Crickit_Pinmap
elif pid == _ROBOHATMM1_PID:
from adafruit_seesaw.robohat import MM1_Pinmap
self.pin_mapping = MM1_Pinmap
else:
from adafruit_seesaw.samd09 import SAMD09_Pinmap
self.pin_mapping = SAMD09_Pinmap
def get_options(self):
buf = bytearray(4)
self.read(_STATUS_BASE, _STATUS_OPTIONS, buf)
ret = struct.unpack(">I", buf)[0]
return ret
def get_version(self):
buf = bytearray(4)
self.read(_STATUS_BASE, _STATUS_VERSION, buf)
ret = struct.unpack(">I", buf)[0]
return ret
def pin_mode(self, pin, mode):
if pin >= 32:
self.pin_mode_bulk_b(1 << (pin - 32), mode)
else:
self.pin_mode_bulk(1 << pin, mode)
def digital_write(self, pin, value):
if pin >= 32:
self.digital_write_bulk_b(1 << (pin - 32), value)
else:
self.digital_write_bulk(1 << pin, value)
def digital_read(self, pin):
if pin >= 32:
return self.digital_read_bulk_b((1 << (pin - 32))) != 0
return self.digital_read_bulk((1 << pin)) != 0
def digital_read_bulk(self, pins):
buf = bytearray(4)
self.read(_GPIO_BASE, _GPIO_BULK, buf)
buf[0] = buf[0] & 0x3F
ret = struct.unpack(">I", buf)[0]
return ret & pins
def digital_read_bulk_b(self, pins):
buf = bytearray(8)
self.read(_GPIO_BASE, _GPIO_BULK, buf)
ret = struct.unpack(">I", buf[4:])[0]
return ret & pins
def set_GPIO_interrupts(self, pins, enabled):
cmd = struct.pack(">I", pins)
if enabled:
self.write(_GPIO_BASE, _GPIO_INTENSET, cmd)
else:
self.write(_GPIO_BASE, _GPIO_INTENCLR, cmd)
def analog_read(self, pin):
buf = bytearray(2)
if pin not in self.pin_mapping.analog_pins:
raise ValueError("Invalid ADC pin")
self.read(_ADC_BASE, _ADC_CHANNEL_OFFSET + self.pin_mapping.analog_pins.index(pin), buf)
ret = struct.unpack(">H", buf)[0]
time.sleep(.001)
return ret
def touch_read(self, pin):
buf = bytearray(2)
if pin not in self.pin_mapping.touch_pins:
raise ValueError("Invalid touch pin")
self.read(_TOUCH_BASE, _TOUCH_CHANNEL_OFFSET + self.pin_mapping.touch_pins.index(pin), buf)
ret = struct.unpack(">H", buf)[0]
return ret
def moisture_read(self):
buf = bytearray(2)
self.read(_TOUCH_BASE, _TOUCH_CHANNEL_OFFSET, buf, .005)
ret = struct.unpack(">H", buf)[0]
time.sleep(.001)
# retry if reading was bad
count = 0
while ret > 4095:
self.read(_TOUCH_BASE, _TOUCH_CHANNEL_OFFSET, buf, .005)
ret = struct.unpack(">H", buf)[0]
time.sleep(.001)
count += 1
if count > 3:
raise RuntimeError("Could not get a valid moisture reading.")
return ret
def _pin_mode_bulk_x(self, capacity, offset, pins, mode):
cmd = bytearray(capacity)
cmd[offset:] = struct.pack(">I", pins)
if mode == self.OUTPUT:
self.write(_GPIO_BASE, _GPIO_DIRSET_BULK, cmd)
elif mode == self.INPUT:
self.write(_GPIO_BASE, _GPIO_DIRCLR_BULK, cmd)
elif mode == self.INPUT_PULLUP:
self.write(_GPIO_BASE, _GPIO_DIRCLR_BULK, cmd)
self.write(_GPIO_BASE, _GPIO_PULLENSET, cmd)
self.write(_GPIO_BASE, _GPIO_BULK_SET, cmd)
elif mode == self.INPUT_PULLDOWN:
self.write(_GPIO_BASE, _GPIO_DIRCLR_BULK, cmd)
self.write(_GPIO_BASE, _GPIO_PULLENSET, cmd)
self.write(_GPIO_BASE, _GPIO_BULK_CLR, cmd)
else:
raise ValueError("Invalid pin mode")
def pin_mode_bulk(self, pins, mode):
self._pin_mode_bulk_x(4, 0, pins, mode)
def pin_mode_bulk_b(self, pins, mode):
self._pin_mode_bulk_x(8, 4, pins, mode)
def digital_write_bulk(self, pins, value):
cmd = struct.pack(">I", pins)
if value:
self.write(_GPIO_BASE, _GPIO_BULK_SET, cmd)
else:
self.write(_GPIO_BASE, _GPIO_BULK_CLR, cmd)
def digital_write_bulk_b(self, pins, value):
cmd = bytearray(8)
cmd[4:] = struct.pack(">I", pins)
if value:
self.write(_GPIO_BASE, _GPIO_BULK_SET, cmd)
else:
self.write(_GPIO_BASE, _GPIO_BULK_CLR, cmd)
def analog_write(self, pin, value):
pin_found = False
if self.pin_mapping.pwm_width == 16:
if pin in self.pin_mapping.pwm_pins:
pin_found = True
cmd = bytearray([self.pin_mapping.pwm_pins.index(pin), (value >> 8), value & 0xFF])
else:
if pin in self.pin_mapping.pwm_pins:
pin_found = True
cmd = bytearray([self.pin_mapping.pwm_pins.index(pin), value])
if pin_found is False:
raise ValueError("Invalid PWM pin")
self.write(_TIMER_BASE, _TIMER_PWM, cmd)
time.sleep(.001)
def get_temp(self):
buf = bytearray(4)
self.read(_STATUS_BASE, _STATUS_TEMP, buf, .005)
buf[0] = buf[0] & 0x3F
ret = struct.unpack(">I", buf)[0]
return 0.00001525878 * ret
def set_pwm_freq(self, pin, freq):
if pin in self.pin_mapping.pwm_pins:
cmd = bytearray([self.pin_mapping.pwm_pins.index(pin), (freq >> 8), freq & 0xFF])
self.write(_TIMER_BASE, _TIMER_FREQ, cmd)
else:
raise ValueError("Invalid PWM pin")
# def enable_sercom_data_rdy_interrupt(self, sercom):
#
# _sercom_inten.DATA_RDY = 1
# self.write8(SEESAW_SERCOM0_BASE + sercom, SEESAW_SERCOM_INTEN, _sercom_inten.get())
#
#
# def disable_sercom_data_rdy_interrupt(self, sercom):
#
# _sercom_inten.DATA_RDY = 0
# self.write8(SEESAW_SERCOM0_BASE + sercom, SEESAW_SERCOM_INTEN, _sercom_inten.get())
#
#
# def read_sercom_data(self, sercom):
#
# return self.read8(SEESAW_SERCOM0_BASE + sercom, SEESAW_SERCOM_DATA)
def set_i2c_addr(self, addr):
self.eeprom_write8(_EEPROM_I2C_ADDR, addr)
time.sleep(.250)
self.i2c_device.device_address = addr
self.sw_reset()
def get_i2c_addr(self):
return self.read8(_EEPROM_BASE, _EEPROM_I2C_ADDR)
def eeprom_write8(self, addr, val):
self.eeprom_write(addr, bytearray([val]))
def eeprom_write(self, addr, buf):
self.write(_EEPROM_BASE, addr, buf)
def eeprom_read8(self, addr):
return self.read8(_EEPROM_BASE, addr)
def uart_set_baud(self, baud):
cmd = struct.pack(">I", baud)
self.write(_SERCOM0_BASE, _SERCOM_BAUD, cmd)
def write8(self, reg_base, reg, value):
self.write(reg_base, reg, bytearray([value]))
def read8(self, reg_base, reg):
ret = bytearray(1)
self.read(reg_base, reg, ret)
return ret[0]
def read(self, reg_base, reg, buf, delay=.005):
self.write(reg_base, reg)
if self._drdy is not None:
while self._drdy.value is False:
pass
else:
time.sleep(delay)
with self.i2c_device as i2c:
i2c.readinto(buf)
def write(self, reg_base, reg, buf=None):
full_buffer = bytearray([reg_base, reg])
if buf is not None:
full_buffer += buf
if self._drdy is not None:
while self._drdy.value is False:
pass
with self.i2c_device as i2c:
i2c.write(full_buffer)
```
|
{
"source": "jepler/Adafruit_Learning_System_Guides",
"score": 3
}
|
#### File: CircuitPython_Templates/i2s_find_pins/code.py
```python
import board
import audiobusio
from microcontroller import Pin
def is_hardware_i2s(bit_clock, word_select, data):
try:
p = audiobusio.I2SOut(bit_clock, word_select, data)
p.deinit()
return True
except ValueError:
return False
def get_unique_pins():
exclude = [
getattr(board, p)
for p in [
# This is not an exhaustive list of unexposed pins. Your results
# may include other pins that you cannot easily connect to.
"NEOPIXEL",
"DOTSTAR_CLOCK",
"DOTSTAR_DATA",
"APA102_SCK",
"APA102_MOSI",
"LED",
"SWITCH",
"BUTTON",
]
if p in dir(board)
]
pins = [
pin
for pin in [getattr(board, p) for p in dir(board)]
if isinstance(pin, Pin) and pin not in exclude
]
unique = []
for p in pins:
if p not in unique:
unique.append(p)
return unique
for bit_clock_pin in get_unique_pins():
for word_select_pin in get_unique_pins():
for data_pin in get_unique_pins():
if bit_clock_pin is word_select_pin or bit_clock_pin is data_pin or word_select_pin \
is data_pin:
continue
if is_hardware_i2s(bit_clock_pin, word_select_pin, data_pin):
print("Bit clock pin:", bit_clock_pin, "\t Word select pin:", word_select_pin,
"\t Data pin:", data_pin)
else:
pass
```
#### File: Smart_Alarm_Clock/button/code.py
```python
import ssl
import time
import board
import digitalio
import socketpool
import wifi
import adafruit_minimqtt.adafruit_minimqtt as MQTT
from adafruit_io.adafruit_io import IO_MQTT
led = digitalio.DigitalInOut(board.IO8)
led.direction = digitalio.Direction.OUTPUT
btn1 = digitalio.DigitalInOut(board.IO9)
btn1.direction = digitalio.Direction.INPUT
btn1.pull = digitalio.Pull.DOWN
ALARM = None
### WiFi ###
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi secrets are kept in secrets.py, please add them there!")
raise
print("Connecting to %s" % secrets["ssid"])
wifi.radio.connect(secrets["ssid"], secrets["password"])
print("Connected to %s!" % secrets["ssid"])
# Define callback functions which will be called when certain events happen.
# pylint: disable=unused-argument
def connected(client):
# Connected function will be called when the client is connected to Adafruit IO.
# This is a good place to subscribe to feed changes. The client parameter
# passed to this function is the Adafruit IO MQTT client so you can make
# calls against it easily.
print("Connected to Adafruit IO!")
client.subscribe("alarm-clock.alarm")
def subscribe(client, userdata, topic, granted_qos):
# This method is called when the client subscribes to a new feed.
print("Subscribed to {0} with QOS level {1}".format(topic, granted_qos))
def unsubscribe(client, userdata, topic, pid):
# This method is called when the client unsubscribes from a feed.
print("Unsubscribed from {0} with PID {1}".format(topic, pid))
# pylint: disable=unused-argument
def disconnected(client):
# Disconnected function will be called when the client disconnects.
print("Disconnected from Adafruit IO!")
# pylint: disable=unused-argument
def message(client, feed_id, payload):
# Message function will be called when a subscribed feed has a new value.
# The feed_id parameter identifies the feed, and the payload parameter has
# the new value.
print("Feed {0} received new value: {1}".format(feed_id, payload))
def on_alarm(client, feed_id, payload):
global ALARM # pylint: disable=global-statement
print(payload)
ALARM = eval(payload) # pylint: disable=eval-used
# Create a socket pool
pool = socketpool.SocketPool(wifi.radio)
# Initialize a new MQTT Client object
mqtt_client = MQTT.MQTT(
broker="io.adafruit.com",
username=secrets["aio_username"],
password=secrets["aio_key"],
socket_pool=pool,
ssl_context=ssl.create_default_context(),
)
# Initialize an Adafruit IO MQTT Client
io = IO_MQTT(mqtt_client)
# Connect the callback methods defined above to Adafruit IO
io.on_connect = connected
io.on_disconnect = disconnected
io.on_subscribe = subscribe
io.on_unsubscribe = unsubscribe
io.on_message = message
io.add_feed_callback("alarm-clock.alarm", on_alarm)
# Connect to Adafruit IO
print("Connecting to Adafruit IO...")
io.connect()
io.get("alarm-clock.alarm")
LAST = 0
while True:
io.loop()
if ALARM and time.monotonic() - LAST >= 0.2:
led.value = not led.value
LAST = time.monotonic()
if btn1.value:
io.publish("alarm-clock.alarm", "False")
led.value = False
led.value = True
time.sleep(1)
led.value = False
```
#### File: Adafruit_Learning_System_Guides/Tilemap_Game_With_CircuitPython/code.py
```python
import time
import random
import gc
import board
import displayio
import adafruit_imageload
import ugame
import terminalio
from adafruit_display_text import label
from tilegame_assets.tiles import TILES
from tilegame_assets.states import (
STATE_PLAYING,
STATE_MAPWIN,
STATE_WAITING,
STATE_LOST_SPARKY,
STATE_MINERVA,
)
from tilegame_assets.fun_facts import FACTS
from tilegame_assets.text_helper import wrap_nicely
# pylint: disable=bad-continuation
# Direction constants for comparison
UP = 0
DOWN = 1
RIGHT = 2
LEFT = 3
# how long to wait between rendering frames
FPS_DELAY = 1 / 60
# how many tiles can fit on thes screen. Tiles are 16x16 pixels
SCREEN_HEIGHT_TILES = 8
SCREEN_WIDTH_TILES = 10
# list of maps in order they should be played
MAPS = ["map0.csv", "map1.csv"]
GAME_STATE = {
# hold the map state as it came out of the csv. Only holds non-entities.
"ORIGINAL_MAP": {},
# hold the current map state as it changes. Only holds non-entities.
"CURRENT_MAP": {},
# Dictionary with touple keys that map to lists of entity objects.
# Each one has the index of the sprite in the ENTITY_SPRITES list
# and the tile type string
"ENTITY_SPRITES_DICT": {},
# hold the location of the player in tile coordinates
"PLAYER_LOC": (0, 0),
# list of items the player has in inventory
"INVENTORY": [],
# how many hearts there are in this map level
"TOTAL_HEARTS": 0,
# sprite object to draw for the player
"PLAYER_SPRITE": None,
# size of the map
"MAP_WIDTH": 0,
"MAP_HEIGHT": 0,
# which map level within MAPS we are currently playing
"MAP_INDEX": 0,
# current state of the state machine
"STATE": STATE_PLAYING,
}
# dictionary with tuple keys that map to tile type values
# e.x. {(0,0): "left_wall", (1,1): "floor"}
CAMERA_VIEW = {}
# how far offset the camera is from the GAME_STATE['CURRENT_MAP']
# used to determine where things are at in the camera view vs. the MAP
CAMERA_OFFSET_X = 0
CAMERA_OFFSET_Y = 0
# list of sprite objects, one for each entity
ENTITY_SPRITES = []
# list of entities that need to be on the screen currently based on the camera view
NEED_TO_DRAW_ENTITIES = []
def get_tile(coords):
"""
:param coords: (x, y) tuple
:return: tile name of the tile at the given coords from GAME_STATE['CURRENT_MAP']
"""
return GAME_STATE["CURRENT_MAP"][coords[0], coords[1]]
def get_tile_obj(coords):
"""
:param coords: (x, y) tuple
:return: tile object with stats and behavior for the tile at the given coords.
"""
return TILES[GAME_STATE["CURRENT_MAP"][coords[0], coords[1]]]
#
def is_tile_moveable(tile_coords):
"""
Check the can_walk property of the tile at the given coordinates
:param tile_coords: (x, y) tuple
:return: True if the player can walk on this tile. False otherwise.
"""
return TILES[GAME_STATE["CURRENT_MAP"][tile_coords[0], tile_coords[1]]]["can_walk"]
print("after funcs {}".format(gc.mem_free()))
# display object variable
display = board.DISPLAY
# Load the sprite sheet (bitmap)
sprite_sheet, palette = adafruit_imageload.load(
"tilegame_assets/sprite_sheet.bmp",
bitmap=displayio.Bitmap,
palette=displayio.Palette,
)
# make green be transparent so entities can be drawn on top of map tiles
palette.make_transparent(0)
# Create the castle TileGrid
castle = displayio.TileGrid(
sprite_sheet,
pixel_shader=palette,
width=10,
height=8,
tile_width=16,
tile_height=16,
)
# Create a Group to hold the sprite and castle
group = displayio.Group()
# Add castle to the group
group.append(castle)
def load_map(file_name):
# pylint: disable=global-statement,too-many-statements,too-many-nested-blocks,too-many-branches
global ENTITY_SPRITES, CAMERA_VIEW
# empty the sprites from the group
for cur_s in ENTITY_SPRITES:
group.remove(cur_s)
# remove player sprite
try:
group.remove(GAME_STATE["PLAYER_SPRITE"])
except ValueError:
pass
# reset map and other game state objects
GAME_STATE["ORIGINAL_MAP"] = {}
GAME_STATE["CURRENT_MAP"] = {}
ENTITY_SPRITES = []
GAME_STATE["ENTITY_SPRITES_DICT"] = {}
CAMERA_VIEW = {}
GAME_STATE["INVENTORY"] = []
GAME_STATE["TOTAL_HEARTS"] = 0
# Open and read raw string from the map csv file
f = open("tilegame_assets/{}".format(file_name), "r")
map_csv_str = f.read()
f.close()
# split the raw string into lines
map_csv_lines = map_csv_str.replace("\r", "").split("\n")
# set the WIDTH and HEIGHT variables.
# this assumes the map is rectangular.
GAME_STATE["MAP_HEIGHT"] = len(map_csv_lines)
GAME_STATE["MAP_WIDTH"] = len(map_csv_lines[0].split(","))
# loop over each line storing index in y variable
for y, line in enumerate(map_csv_lines):
# ignore empty line
if line != "":
# loop over each tile type separated by commas, storing index in x variable
for x, tile_name in enumerate(line.split(",")):
print("%s '%s'" % (len(tile_name), str(tile_name)))
# if the tile exists in our main dictionary
if tile_name in TILES.keys():
# if the tile is an entity
if (
"entity" in TILES[tile_name].keys()
and TILES[tile_name]["entity"]
):
# set the map tiles to floor
GAME_STATE["ORIGINAL_MAP"][x, y] = "floor"
GAME_STATE["CURRENT_MAP"][x, y] = "floor"
if tile_name == "heart":
GAME_STATE["TOTAL_HEARTS"] += 1
# if it's the player
if tile_name == "player":
# Create the sprite TileGrid
GAME_STATE["PLAYER_SPRITE"] = displayio.TileGrid(
sprite_sheet,
pixel_shader=palette,
width=1,
height=1,
tile_width=16,
tile_height=16,
default_tile=TILES[tile_name]["sprite_index"],
)
# set the position of sprite on screen
GAME_STATE["PLAYER_SPRITE"].x = x * 16
GAME_STATE["PLAYER_SPRITE"].y = y * 16
# set position in x,y tile coords for reference later
GAME_STATE["PLAYER_LOC"] = (x, y)
# add sprite to the group
group.append(GAME_STATE["PLAYER_SPRITE"])
else: # not the player
# Create the sprite TileGrid
entity_srite = displayio.TileGrid(
sprite_sheet,
pixel_shader=palette,
width=1,
height=1,
tile_width=16,
tile_height=16,
default_tile=TILES[tile_name]["sprite_index"],
)
# set the position of sprite on screen
# default to off the edge
entity_srite.x = -16
entity_srite.y = -16
# add the sprite object to ENTITY_SPRITES list
ENTITY_SPRITES.append(entity_srite)
# print("setting GAME_STATE['ENTITY_SPRITES_DICT'][%s,%s]" % (x,y))
# create an entity obj
_entity_obj = {
"entity_sprite_index": len(ENTITY_SPRITES) - 1,
"map_tile_name": tile_name,
}
# if there are no entities at this location yet
if (x, y) not in GAME_STATE["ENTITY_SPRITES_DICT"]:
# create a list and add it to the dictionary at the x,y location
GAME_STATE["ENTITY_SPRITES_DICT"][x, y] = [_entity_obj]
else:
# append the entity to the existing list in the dictionary
GAME_STATE["ENTITY_SPRITES_DICT"][x, y].append(
_entity_obj
)
else: # tile is not entity
# set the tile_name into MAP dictionaries
GAME_STATE["ORIGINAL_MAP"][x, y] = tile_name
GAME_STATE["CURRENT_MAP"][x, y] = tile_name
else: # tile type wasn't found in dict
print("tile: %s not found in TILES dict" % tile_name)
# add all entity sprites to the group
print("appending {} sprites".format(len(ENTITY_SPRITES)))
for entity in ENTITY_SPRITES:
group.append(entity)
print("loading map")
load_map(MAPS[GAME_STATE["MAP_INDEX"]])
# Add the Group to the Display
display.show(group)
# variables to store previous value of button state
prev_up = False
prev_down = False
prev_left = False
prev_right = False
# helper function returns true if player is allowed to move given direction
# based on can_walk property of the tiles next to the player
def can_player_move(direction):
try:
if direction == UP:
tile_above_coords = (
GAME_STATE["PLAYER_LOC"][0],
GAME_STATE["PLAYER_LOC"][1] - 1,
)
return TILES[
GAME_STATE["CURRENT_MAP"][tile_above_coords[0], tile_above_coords[1]]
]["can_walk"]
if direction == DOWN:
tile_below_coords = (
GAME_STATE["PLAYER_LOC"][0],
GAME_STATE["PLAYER_LOC"][1] + 1,
)
return TILES[
GAME_STATE["CURRENT_MAP"][tile_below_coords[0], tile_below_coords[1]]
]["can_walk"]
if direction == LEFT:
tile_left_of_coords = (
GAME_STATE["PLAYER_LOC"][0] - 1,
GAME_STATE["PLAYER_LOC"][1],
)
return TILES[
GAME_STATE["CURRENT_MAP"][
tile_left_of_coords[0], tile_left_of_coords[1]
]
]["can_walk"]
if direction == RIGHT:
tile_right_of_coords = (
GAME_STATE["PLAYER_LOC"][0] + 1,
GAME_STATE["PLAYER_LOC"][1],
)
return TILES[
GAME_STATE["CURRENT_MAP"][
tile_right_of_coords[0], tile_right_of_coords[1]
]
]["can_walk"]
except KeyError:
return False
return None
# set the appropriate tiles into the CAMERA_VIEW dictionary
# based on given starting coords and size
def set_camera_view(startX, startY, width, height):
# pylint: disable=global-statement
global CAMERA_OFFSET_X
global CAMERA_OFFSET_Y
# set the offset variables for use in other parts of the code
CAMERA_OFFSET_X = startX
CAMERA_OFFSET_Y = startY
# loop over the rows and indexes in the desired size section
for y_index, y in enumerate(range(startY, startY + height)):
# loop over columns and indexes in the desired size section
for x_index, x in enumerate(range(startX, startX + width)):
# print("setting camera_view[%s,%s]" % (x_index,y_index))
try:
# set the tile at the current coordinate of the MAP into the CAMERA_VIEW
CAMERA_VIEW[x_index, y_index] = GAME_STATE["CURRENT_MAP"][x, y]
except KeyError:
# if coordinate is out of bounds set it to floor by default
CAMERA_VIEW[x_index, y_index] = "floor"
# draw the current CAMERA_VIEW dictionary and the GAME_STATE['ENTITY_SPRITES_DICT']
def draw_camera_view():
# list that will hold all entities that have been drawn based on their MAP location
# any entities not in this list should get moved off the screen
drew_entities = []
# print(CAMERA_VIEW)
# pylint: disable=too-many-nested-blocks
# loop over y tile coordinates
for y in range(0, SCREEN_HEIGHT_TILES):
# loop over x tile coordinates
for x in range(0, SCREEN_WIDTH_TILES):
# tile name at this location
tile_name = CAMERA_VIEW[x, y]
# if tile exists in the main dictionary
if tile_name in TILES.keys():
# if there are entity(s) at this location
if (x + CAMERA_OFFSET_X, y + CAMERA_OFFSET_Y) in GAME_STATE[
"ENTITY_SPRITES_DICT"
]:
# default background for entities is floor
castle[x, y] = TILES["floor"]["sprite_index"]
# if it's not the player
if tile_name != "player":
# loop over all entities at this location
for entity_obj_at_tile in GAME_STATE["ENTITY_SPRITES_DICT"][
x + CAMERA_OFFSET_X, y + CAMERA_OFFSET_Y
]:
# set appropriate x,y screen coordinates
# based on tile coordinates
ENTITY_SPRITES[
int(entity_obj_at_tile["entity_sprite_index"])
].x = (x * 16)
ENTITY_SPRITES[
int(entity_obj_at_tile["entity_sprite_index"])
].y = (y * 16)
# add the index of the entity sprite to the draw_entities
# list so we know not to hide it later.
drew_entities.append(
entity_obj_at_tile["entity_sprite_index"]
)
else: # no entities at this location
# set the sprite index of this tile into the CASTLE dictionary
castle[x, y] = TILES[tile_name]["sprite_index"]
else: # tile type not found in main dictionary
# default to floor tile
castle[x, y] = TILES["floor"]["sprite_index"]
# if the player is at this x,y tile coordinate accounting for camera offset
if GAME_STATE["PLAYER_LOC"] == ((x + CAMERA_OFFSET_X, y + CAMERA_OFFSET_Y)):
# set player sprite screen coordinates
GAME_STATE["PLAYER_SPRITE"].x = x * 16
GAME_STATE["PLAYER_SPRITE"].y = y * 16
# loop over all entity sprites
for index in range(0, len(ENTITY_SPRITES)):
# if the sprite wasn't drawn then it's outside the camera view
if index not in drew_entities:
# hide the sprite by moving it off screen
ENTITY_SPRITES[index].x = int(-16)
ENTITY_SPRITES[index].y = int(-16)
# variable to store timestamp of last drawn frame
last_update_time = 0
# variables to store movement offset values
x_offset = 0
y_offset = 0
def show_splash(new_text, color, vertical_offset=18):
text_area.text = ""
text_area.text = new_text
text_area.anchor_point = (0, 0)
text_area.anchored_position = (0, vertical_offset)
text_area.color = color
group.append(splash)
# Make the splash context
splash = displayio.Group()
# CircuitPython 6 & 7 compatible
# game message background bmp file
game_message_background = open("tilegame_assets/game_message_background.bmp", "rb")
odb = displayio.OnDiskBitmap(game_message_background)
bg_grid = displayio.TileGrid(odb, pixel_shader=getattr(odb, 'pixel_shader', displayio.ColorConverter()))
# # CircuitPython 7+ compatible
# game message background bmp file
# odb = displayio.OnDiskBitmap("tilegame_assets/game_message_background.bmp")
# bg_grid = displayio.TileGrid(odb, pixel_shader=odb.pixel_shader)
splash.append(bg_grid)
# Text for the message
text_group = displayio.Group(x=14, y=8)
text_area = label.Label(terminalio.FONT, text=" " * 180, color=0xD39AE5)
text_group.append(text_area)
splash.append(text_group)
# main loop
while True:
# set the current button values into variables
cur_btn_vals = ugame.buttons.get_pressed()
cur_up = cur_btn_vals & ugame.K_UP
cur_down = cur_btn_vals & ugame.K_DOWN
cur_right = cur_btn_vals & ugame.K_RIGHT
cur_left = cur_btn_vals & ugame.K_LEFT
cur_a = cur_btn_vals & ugame.K_O or cur_btn_vals & ugame.K_X
if GAME_STATE["STATE"] == STATE_WAITING:
print(cur_a)
if cur_a:
GAME_STATE["STATE"] = STATE_PLAYING
group.remove(splash)
if GAME_STATE["STATE"] == STATE_PLAYING:
# check for up button press / release
if not cur_up and prev_up:
if can_player_move(UP):
x_offset = 0
y_offset = -1
# check for down button press / release
if not cur_down and prev_down:
if can_player_move(DOWN):
x_offset = 0
y_offset = 1
# check for right button press / release
if not cur_right and prev_right:
if can_player_move(RIGHT):
x_offset = 1
y_offset = 0
# check for left button press / release
if not cur_left and prev_left:
if can_player_move(LEFT):
x_offset = -1
y_offset = 0
# if any offset is not zero then we need to process player movement
if x_offset != 0 or y_offset != 0:
# variable to store if player is allowed to move
can_move = False
# coordinates the player is moving to
moving_to_coords = (
GAME_STATE["PLAYER_LOC"][0] + x_offset,
GAME_STATE["PLAYER_LOC"][1] + y_offset,
)
# tile name of the spot player is moving to
moving_to_tile_name = GAME_STATE["CURRENT_MAP"][
moving_to_coords[0], moving_to_coords[1]
]
# if there are entity(s) at spot the player is moving to
if moving_to_coords in GAME_STATE["ENTITY_SPRITES_DICT"]:
print("found entity(s) where we are moving to")
# loop over all entities at the location player is moving to
for entity_obj in GAME_STATE["ENTITY_SPRITES_DICT"][
moving_to_coords
]:
print("checking entity %s" % entity_obj["map_tile_name"])
# if the entity has a before_move behavior function
if "before_move" in TILES[entity_obj["map_tile_name"]].keys():
print(
"calling before_move %s, %s, %s"
% (
moving_to_coords,
GAME_STATE["PLAYER_LOC"],
entity_obj,
)
)
# call the before_move behavior function act upon it's result
if TILES[entity_obj["map_tile_name"]]["before_move"](
moving_to_coords,
GAME_STATE["PLAYER_LOC"],
entity_obj,
GAME_STATE,
):
# all the movement if it returned true
can_move = True
else:
# break and don't allow movement if it returned false
break
else: # entity does not have a before_move function
# allow movement
can_move = True
if can_move:
# set the player loc variable to the new coords
GAME_STATE["PLAYER_LOC"] = moving_to_coords
else: # no entities at the location player is moving to
# set player loc variable to new coords
GAME_STATE["PLAYER_LOC"] = moving_to_coords
# reset movement offset variables
y_offset = 0
x_offset = 0
# set previous button values for next iteration
prev_up = cur_up
prev_down = cur_down
prev_right = cur_right
prev_left = cur_left
# current time
now = time.monotonic()
# if it has been long enough based on FPS delay
if now > last_update_time + FPS_DELAY:
# Set camera to 10x8 centered on the player
# Clamped to (0, MAP_WIDTH) and (0, MAP_HEIGHT)
set_camera_view(
max(
min(
GAME_STATE["PLAYER_LOC"][0] - 4,
GAME_STATE["MAP_WIDTH"] - SCREEN_WIDTH_TILES,
),
0,
),
max(
min(
GAME_STATE["PLAYER_LOC"][1] - 3,
GAME_STATE["MAP_HEIGHT"] - SCREEN_HEIGHT_TILES,
),
0,
),
10,
8,
)
# draw the camera
draw_camera_view()
# if player beat this map
if GAME_STATE["STATE"] == STATE_MAPWIN:
GAME_STATE["MAP_INDEX"] += 1
# if player has beaten all maps
if GAME_STATE["MAP_INDEX"] >= len(MAPS):
GAME_STATE["MAP_INDEX"] = 0
GAME_STATE["STATE"] = STATE_WAITING
load_map(MAPS[GAME_STATE["MAP_INDEX"]])
show_splash(
"You Win \n =D \nCongratulations. \nStart Over?", 0x29C1CF
)
else:
# prompt to start next
GAME_STATE["STATE"] = STATE_WAITING
load_map(MAPS[GAME_STATE["MAP_INDEX"]])
show_splash(
"You beat this level\n =D \nCongratulations. \nStart Next?",
0x29C1CF,
)
# game over from sparky
elif GAME_STATE["STATE"] == STATE_LOST_SPARKY:
GAME_STATE["MAP_INDEX"] = 0
GAME_STATE["STATE"] = STATE_WAITING
game_over_text = (
"Be careful not to \ntouch Sparky unless \n"
"you've collected \nenough Mho's.\nStarting Over"
)
load_map(MAPS[GAME_STATE["MAP_INDEX"]])
show_splash(game_over_text, 0x25AFBB)
# talking to minerva
elif GAME_STATE["STATE"] == STATE_MINERVA:
GAME_STATE["STATE"] = STATE_WAITING
random_fact = random.choice(FACTS)
minerva_txt = wrap_nicely("Minerva: {}".format(random_fact), 23)
show_splash(minerva_txt, 0xD39AE5, 0)
# store the last update time
last_update_time = now
```
|
{
"source": "jepler/circup",
"score": 2
}
|
#### File: jepler/circup/circup.py
```python
import ctypes
import glob
import json
import logging
import os
from pathlib import Path
import re
import shutil
from subprocess import check_output
import sys
import zipfile
import appdirs
import click
import requests
from semver import VersionInfo
# Useful constants.
#: The unique USB vendor ID for Adafruit boards.
VENDOR_ID = 9114
#: Flag to indicate if the command is being run in verbose mode.
VERBOSE = False
#: The location of data files used by circup (following OS conventions).
DATA_DIR = appdirs.user_data_dir(appname="circup", appauthor="adafruit")
#: The path to the JSON file containing the metadata about the current bundle.
BUNDLE_DATA = os.path.join(DATA_DIR, "circup.json")
#: The path to the zip file containing the current library bundle.
BUNDLE_ZIP = os.path.join(DATA_DIR, "adafruit-circuitpython-bundle-{}.zip")
#: The path to the directory into which the current bundle is unzipped.
BUNDLE_DIR = os.path.join(DATA_DIR, "adafruit_circuitpython_bundle_{}")
#: The directory containing the utility's log file.
LOG_DIR = appdirs.user_log_dir(appname="circup", appauthor="adafruit")
#: The location of the log file for the utility.
LOGFILE = os.path.join(LOG_DIR, "circup.log")
#: The libraries (and blank lines) which don't go on devices
NOT_MCU_LIBRARIES = [
"",
"adafruit-blinka",
"adafruit-blinka-bleio",
"adafruit-blinka-displayio",
"pyserial",
]
#: The version of CircuitPython found on the connected device.
CPY_VERSION = ""
#: The latest version of the CircuitPython Bundle from github.
LATEST_BUNDLE_VERSION = ""
# Ensure DATA_DIR / LOG_DIR related directories and files exist.
if not os.path.exists(DATA_DIR): # pragma: no cover
os.makedirs(DATA_DIR)
if not os.path.exists(LOG_DIR): # pragma: no cover
os.makedirs(LOG_DIR)
# Setup logging.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logfile_handler = logging.FileHandler(LOGFILE)
log_formatter = logging.Formatter(
"%(asctime)s %(levelname)s: %(message)s", datefmt="%m/%d/%Y %H:%M:%S"
)
logfile_handler.setFormatter(log_formatter)
logger.addHandler(logfile_handler)
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/circup.git"
class Module:
"""
Represents a CircuitPython module.
"""
# pylint: disable=too-many-arguments
def __init__(self, path, repo, device_version, bundle_version, mpy):
"""
The ``self.file`` and ``self.name`` attributes are constructed from
the ``path`` value. If the path is to a directory based module, the
resulting self.file value will be None, and the name will be the
basename of the directory path.
:param str path: The path to the module on the connected
CIRCUITPYTHON device.
:param str repo: The URL of the Git repository for this module.
:param str device_version: The semver value for the version on device.
:param str bundle_version: The semver value for the version in bundle.
:param bool mpy: Flag to indicate if the module is byte-code compiled.
"""
self.path = path
if os.path.isfile(self.path):
# Single file module.
self.file = os.path.basename(path)
self.name = self.file.replace(".py", "").replace(".mpy", "")
else:
# Directory based module.
self.file = None
self.name = os.path.basename(os.path.dirname(self.path))
self.repo = repo
self.device_version = device_version
self.bundle_version = bundle_version
self.mpy = mpy
# Figure out the bundle path.
self.bundle_path = None
if self.mpy:
# Byte compiled, now check CircuitPython version.
major_version = CPY_VERSION.split(".")[0]
bundle_platform = "{}mpy".format(major_version)
else:
# Regular Python
bundle_platform = "py"
for search_path, _, _ in os.walk(BUNDLE_DIR.format(bundle_platform)):
if os.path.basename(search_path) == "lib":
if self.file:
self.bundle_path = os.path.join(search_path, self.file)
else:
self.bundle_path = os.path.join(search_path, self.name)
logger.info(self)
# pylint: enable=too-many-arguments
@property
def outofdate(self):
"""
Returns a boolean to indicate if this module is out of date.
:return: Truthy indication if the module is out of date.
"""
if self.device_version and self.bundle_version:
try:
return VersionInfo.parse(self.device_version) < VersionInfo.parse(
self.bundle_version
)
except ValueError as ex:
logger.warning("Module '%s' has incorrect semver value.", self.name)
logger.warning(ex)
return True # Assume out of date to try to update.
@property
def major_update(self):
"""
Returns a boolean to indicate if this is a major version update.
:return: Boolean indicating if this is a major version upgrade
"""
try:
if (
VersionInfo.parse(self.device_version).major
== VersionInfo.parse(self.bundle_version).major
):
return False
except (TypeError, ValueError) as ex:
logger.warning("Module '%s' has incorrect semver value.", self.name)
logger.warning(ex)
return True # Assume Major Version udpate.
@property
def row(self):
"""
Returns a tuple of items to display in a table row to show the module's
name, local version and remote version.
:return: A tuple containing the module's name, version on the connected
device and version in the latest bundle.
"""
loc = self.device_version if self.device_version else "unknown"
rem = self.bundle_version if self.bundle_version else "unknown"
major_update = str(self.major_update)
return (self.name, loc, rem, major_update)
def update(self):
"""
Delete the module on the device, then copy the module from the bundle
back onto the device.
The caller is expected to handle any exceptions raised.
"""
if os.path.isdir(self.path):
# Delete and copy the directory.
shutil.rmtree(self.path, ignore_errors=True)
shutil.copytree(self.bundle_path, self.path)
else:
# Delete and copy file.
os.remove(self.path)
shutil.copyfile(self.bundle_path, self.path)
def __repr__(self):
"""
Helps with log files.
:return: A repr of a dictionary containing the module's metadata.
"""
return repr(
{
"path": self.path,
"file": self.file,
"name": self.name,
"repo": self.repo,
"device_version": self.device_version,
"bundle_version": self.bundle_version,
"bundle_path": self.bundle_path,
"mpy": self.mpy,
}
)
def clean_library_name(assumed_library_name):
"""
Most CP repos and library names are look like this:
repo: Adafruit_CircuitPython_LC709203F
library: adafruit_lc709203f
But some do not and this handles cleaning that up.
Also cleans up if the pypi or reponame is passed in instead of the
CP library name.
:param str assumed_library_name: An assumed name of a library from user
or requirements.txt entry
:return: str proper library name
"""
not_standard_names = {
# Assumed Name : Actual Name
"adafruit_adafruitio": "adafruit_io",
"adafruit_busdevice": "adafruit_bus_device",
"adafruit_neopixel": "neopixel",
"adafruit_sd": "adafruit_sdcard",
"adafruit_simpleio": "simpleio",
}
if "circuitpython" in assumed_library_name:
# convert repo or pypi name to common library name
assumed_library_name = (
assumed_library_name.replace("-circuitpython-", "_")
.replace("_circuitpython_", "_")
.replace("-", "_")
)
if assumed_library_name in not_standard_names.keys():
return not_standard_names[assumed_library_name]
return assumed_library_name
def ensure_latest_bundle():
"""
Ensure that there's a copy of the latest library bundle available so circup
can check the metadata contained therein.
"""
logger.info("Checking for library updates.")
tag = get_latest_tag()
old_tag = "0"
if os.path.isfile(BUNDLE_DATA):
with open(BUNDLE_DATA, encoding="utf-8") as data:
try:
old_tag = json.load(data)["tag"]
except json.decoder.JSONDecodeError as ex:
# Sometimes (why?) the JSON file becomes corrupt. In which case
# log it and carry on as if setting up for first time.
logger.error("Could not parse %s", BUNDLE_DATA)
logger.exception(ex)
if tag > old_tag:
logger.info("New version available (%s).", tag)
try:
get_bundle(tag)
with open(BUNDLE_DATA, "w", encoding="utf-8") as data:
json.dump({"tag": tag}, data)
except requests.exceptions.HTTPError as ex:
# See #20 for reason this this
click.secho(
(
"There was a problem downloading the bundle. "
"Please try again in a moment."
),
fg="red",
)
logger.exception(ex)
sys.exit(1)
else:
logger.info("Current library bundle up to date %s.", tag)
def extract_metadata(path):
"""
Given an file path, return a dictionary containing metadata extracted from
dunder attributes found therein. Works with both .py and .mpy files.
For Python source files, such metadata assignments should be simple and
single-line. For example::
__version__ = "1.1.4"
__repo__ = "https://github.com/adafruit/SomeLibrary.git"
For byte compiled .mpy files, a brute force / backtrack approach is used
to find the __version__ number in the file -- see comments in the
code for the implementation details.
:param str path: The path to the file containing the metadata.
:return: The dunder based metadata found in the file, as a dictionary.
"""
result = {}
logger.info("%s", path)
if path.endswith(".py"):
result["mpy"] = False
with open(path, encoding="utf-8") as source_file:
content = source_file.read()
#: The regex used to extract ``__version__`` and ``__repo__`` assignments.
dunder_key_val = r"""(__\w+__)\s*=\s*(?:['"]|\(\s)(.+)['"]"""
for match in re.findall(dunder_key_val, content):
result[match[0]] = str(match[1])
if result:
logger.info("Extracted metadata: %s", result)
return result
if path.endswith(".mpy"):
result["mpy"] = True
with open(path, "rb") as mpy_file:
content = mpy_file.read()
# Find the start location of the "__version__" (prepended with byte
# value of 11 to indicate length of "__version__").
loc = content.find(b"\x0b__version__")
if loc > -1:
# Backtrack until a byte value of the offset is reached.
offset = 1
while offset < loc:
val = int(content[loc - offset])
if val == offset - 1: # Off by one..!
# Found version, extract the number given boundaries.
start = loc - offset + 1 # No need for prepended length.
end = loc # Up to the start of the __version__.
version = content[start:end] # Slice the version number.
# Create a string version as metadata in the result.
result = {"__version__": version.decode("utf-8"), "mpy": True}
break # Nothing more to do.
offset += 1 # ...and again but backtrack by one.
return result
def find_device():
"""
Return the location on the filesystem for the connected Adafruit device.
This is based upon how Mu discovers this information.
:return: The path to the device on the local filesystem.
"""
device_dir = None
# Attempt to find the path on the filesystem that represents the plugged in
# CIRCUITPY board.
if os.name == "posix":
# Linux / OSX
for mount_command in ["mount", "/sbin/mount"]:
try:
mount_output = check_output(mount_command).splitlines()
mounted_volumes = [x.split()[2] for x in mount_output]
for volume in mounted_volumes:
if volume.endswith(b"CIRCUITPY"):
device_dir = volume.decode("utf-8")
except FileNotFoundError:
continue
elif os.name == "nt":
# Windows
def get_volume_name(disk_name):
"""
Each disk or external device connected to windows has an attribute
called "volume name". This function returns the volume name for the
given disk/device.
Based upon answer given here: http://stackoverflow.com/a/12056414
"""
vol_name_buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.kernel32.GetVolumeInformationW(
ctypes.c_wchar_p(disk_name),
vol_name_buf,
ctypes.sizeof(vol_name_buf),
None,
None,
None,
None,
0,
)
return vol_name_buf.value
#
# In certain circumstances, volumes are allocated to USB
# storage devices which cause a Windows popup to raise if their
# volume contains no media. Wrapping the check in SetErrorMode
# with SEM_FAILCRITICALERRORS (1) prevents this popup.
#
old_mode = ctypes.windll.kernel32.SetErrorMode(1)
try:
for disk in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
path = "{}:\\".format(disk)
if os.path.exists(path) and get_volume_name(path) == "CIRCUITPY":
device_dir = path
# Report only the FIRST device found.
break
finally:
ctypes.windll.kernel32.SetErrorMode(old_mode)
else:
# No support for unknown operating systems.
raise NotImplementedError('OS "{}" not supported.'.format(os.name))
logger.info("Found device: %s", device_dir)
return device_dir
def find_modules(device_path):
"""
Extracts metadata from the connected device and available bundle and
returns this as a list of Module instances representing the modules on the
device.
:return: A list of Module instances describing the current state of the
modules on the connected device.
"""
# pylint: disable=broad-except
try:
device_modules = get_device_versions(device_path)
bundle_modules = get_bundle_versions()
result = []
for name, device_metadata in device_modules.items():
if name in bundle_modules:
bundle_metadata = bundle_modules[name]
path = device_metadata["path"]
repo = bundle_metadata.get("__repo__")
device_version = device_metadata.get("__version__")
bundle_version = bundle_metadata.get("__version__")
mpy = device_metadata["mpy"]
result.append(Module(path, repo, device_version, bundle_version, mpy))
return result
except Exception as ex:
# If it's not possible to get the device and bundle metadata, bail out
# with a friendly message and indication of what's gone wrong.
logger.exception(ex)
click.echo("There was a problem: {}".format(ex))
sys.exit(1)
# pylint: enable=broad-except
def get_bundle(tag):
"""
Downloads and extracts the version of the bundle with the referenced tag.
:param str tag: The GIT tag to use to download the bundle.
:return: The location of the resulting zip file in a temporary location on
the local filesystem.
"""
urls = {
"py": (
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle"
"/releases/download"
"/{tag}/adafruit-circuitpython-bundle-py-{tag}.zip".format(tag=tag)
),
"6mpy": (
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/"
"releases/download"
"/{tag}/adafruit-circuitpython-bundle-6.x-mpy-{tag}.zip".format(tag=tag)
),
"7mpy": (
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/"
"releases/download"
"/{tag}/adafruit-circuitpython-bundle-7.x-mpy-{tag}.zip".format(tag=tag)
),
}
click.echo("Downloading latest version information.\n")
for platform, url in urls.items():
logger.info("Downloading bundle: %s", url)
r = requests.get(url, stream=True)
# pylint: disable=no-member
if r.status_code != requests.codes.ok:
logger.warning("Unable to connect to %s", url)
r.raise_for_status()
# pylint: enable=no-member
total_size = int(r.headers.get("Content-Length"))
temp_zip = BUNDLE_ZIP.format(platform)
with click.progressbar(r.iter_content(1024), length=total_size) as pbar, open(
temp_zip, "wb"
) as f:
for chunk in pbar:
f.write(chunk)
pbar.update(len(chunk))
logger.info("Saved to %s", temp_zip)
temp_dir = BUNDLE_DIR.format(platform)
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
with zipfile.ZipFile(temp_zip, "r") as zfile:
zfile.extractall(temp_dir)
click.echo("\nOK\n")
def get_bundle_versions():
"""
Returns a dictionary of metadata from modules in the latest known release
of the library bundle. Uses the Python version (rather than the compiled
version) of the library modules.
:return: A dictionary of metadata about the modules available in the
library bundle.
"""
ensure_latest_bundle()
path = None
for path, _, _ in os.walk(BUNDLE_DIR.format("py")):
if os.path.basename(path) == "lib":
break
return get_modules(path)
def get_circuitpython_version(device_path):
"""
Returns the version number of CircuitPython running on the board connected
via ``device_path``. This is obtained from the ``boot_out.txt`` file on the
device, whose content will start with something like this::
Adafruit CircuitPython 4.1.0 on 2019-08-02;
:param str device_path: The path to the connected board.
:return: The version string for CircuitPython running on the connected
board.
"""
with open(os.path.join(device_path, "boot_out.txt")) as boot:
circuit_python, _ = boot.read().split(";")
return circuit_python.split(" ")[-3]
def get_dependencies(*requested_libraries, mod_names, to_install=()):
"""
Return a list of other CircuitPython libraries
:param tuple requested_libraries: The libraries to search for dependencies
:param object mod_names: All the modules metadata from bundle
:return: tuple of module names to install which we build
"""
# Internal variables
_to_install = to_install
_requested_libraries = []
_rl = requested_libraries[0]
if not requested_libraries[0]:
# If nothing is requested, we're done
return _to_install
for l in _rl:
# Convert tuple to list and force all to lowercase, Clean the names
l = clean_library_name(l.lower())
if l in NOT_MCU_LIBRARIES:
logger.info("Skipping %s. It is not for microcontroller installs.", l)
else:
try:
# Don't process any names we can't find in mod_names
mod_names[l] # pylint: disable=pointless-statement
_requested_libraries.append(l)
except KeyError:
click.secho(
f"WARNING:\n\t{l} is not a known CircuitPython library.",
fg="yellow",
)
if not _requested_libraries:
# If nothing is requested, we're done
return _to_install
for library in _requested_libraries:
if library not in _to_install:
_to_install = _to_install + (library,)
# get the requirements.txt from bundle
requirements_txt = get_requirements(library)
if requirements_txt:
_requested_libraries.extend(
libraries_from_requirements(requirements_txt)
)
# we've processed this library, remove it from the list
_requested_libraries.remove(library)
return get_dependencies(
tuple(_requested_libraries),
mod_names=mod_names,
to_install=_to_install,
)
def get_device_versions(device_path):
"""
Returns a dictionary of metadata from modules on the connected device.
:return: A dictionary of metadata about the modules available on the
connected device.
"""
return get_modules(os.path.join(device_path, "lib"))
def get_latest_release_from_url(url):
"""
Find the tag name of the latest release by using HTTP HEAD and decoding the redirect.
:return: The most recent tag value for the release.
"""
logger.info("Requesting redirect information: %s", url)
response = requests.head(url)
responseurl = response.url
if response.is_redirect:
responseurl = response.headers["Location"]
tag = responseurl.rsplit("/", 1)[-1]
logger.info("Tag: '%s'", tag)
return tag
def get_latest_tag():
"""
Find the value of the latest tag for the Adafruit CircuitPython library
bundle.
:return: The most recent tag value for the project.
"""
global LATEST_BUNDLE_VERSION
if LATEST_BUNDLE_VERSION == "":
LATEST_BUNDLE_VERSION = get_latest_release_from_url(
"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest"
)
return LATEST_BUNDLE_VERSION
def get_modules(path):
"""
Get a dictionary containing metadata about all the Python modules found in
the referenced path.
:param str path: The directory in which to find modules.
:return: A dictionary containing metadata about the found modules.
"""
result = {}
if not path:
return result
single_file_py_mods = glob.glob(os.path.join(path, "*.py"))
single_file_mpy_mods = glob.glob(os.path.join(path, "*.mpy"))
directory_mods = [
d
for d in glob.glob(os.path.join(path, "*", ""))
if not os.path.basename(os.path.normpath(d)).startswith(".")
]
single_file_mods = single_file_py_mods + single_file_mpy_mods
for sfm in [f for f in single_file_mods if not os.path.basename(f).startswith(".")]:
metadata = extract_metadata(sfm)
metadata["path"] = sfm
result[os.path.basename(sfm).replace(".py", "").replace(".mpy", "")] = metadata
for dm in directory_mods:
name = os.path.basename(os.path.dirname(dm))
metadata = {}
py_files = glob.glob(os.path.join(dm, "*.py"))
mpy_files = glob.glob(os.path.join(dm, "*.mpy"))
all_files = py_files + mpy_files
for source in [f for f in all_files if not os.path.basename(f).startswith(".")]:
metadata = extract_metadata(source)
if "__version__" in metadata:
metadata["path"] = dm
result[name] = metadata
break
else:
# No version metadata found.
result[name] = {"path": dm, "mpy": bool(mpy_files)}
return result
def get_requirements(library_name):
"""
Return a string of the requirements.txt for a GitHub Repo
NOTE: This is only looks at the py bundle. No known differences in the mpy
bundle for requirements.txt
:param str library_name: CircuitPython library name
:return: str the content of requirements.txt or None if not found
"""
bundle_path = BUNDLE_DIR.format("py")
requirements_txt = (
"{}/adafruit-circuitpython-bundle-py-{}/requirements/{}/"
"requirements.txt".format(bundle_path, get_latest_tag(), library_name)
)
if Path(requirements_txt).is_file():
return open(requirements_txt).read()
return None
# pylint: disable=too-many-locals,too-many-branches
def install_module(device_path, name, py, mod_names): # pragma: no cover
"""
Finds a connected device and installs a given module name if it
is available in the current module bundle and is not already
installed on the device.
TODO: There is currently no check for the version.
:param str device_path: The path to the connected board.
:param str name: Name of module to install
:param bool py: Boolean to specify if the module should be installed from
source or from a pre-compiled module
:param mod_names: Dictionary of metadata from modules that can be generated
with get_bundle_versions()
"""
if not name:
click.echo("No module name(s) provided.")
elif name in mod_names:
library_path = os.path.join(device_path, "lib")
if not os.path.exists(library_path): # pragma: no cover
os.makedirs(library_path)
metadata = mod_names[name]
# Grab device modules to check if module already installed
device_modules = []
for module in find_modules(device_path):
device_modules.append(module.name)
if name in device_modules:
click.echo("'{}' is already installed.".format(name))
return
if py:
# Use Python source for module.
source_path = metadata["path"] # Path to Python source version.
if os.path.isdir(source_path):
target = os.path.basename(os.path.dirname(source_path))
target_path = os.path.join(library_path, target)
# Copy the directory.
shutil.copytree(source_path, target_path)
else:
target = os.path.basename(source_path)
target_path = os.path.join(library_path, target)
# Copy file.
shutil.copyfile(source_path, target_path)
else:
# Use pre-compiled mpy modules.
module_name = os.path.basename(metadata["path"]).replace(".py", ".mpy")
if not module_name:
# Must be a directory based module.
module_name = os.path.basename(os.path.dirname(metadata["path"]))
major_version = CPY_VERSION.split(".")[0]
bundle_platform = "{}mpy".format(major_version)
bundle_path = ""
for path, _, _ in os.walk(BUNDLE_DIR.format(bundle_platform)):
if os.path.basename(path) == "lib":
bundle_path = os.path.join(path, module_name)
if bundle_path:
if os.path.isdir(bundle_path):
target_path = os.path.join(library_path, module_name)
# Copy the directory.
shutil.copytree(bundle_path, target_path)
else:
target = os.path.basename(bundle_path)
target_path = os.path.join(library_path, target)
# Copy file.
shutil.copyfile(bundle_path, target_path)
else:
raise IOError("Cannot find compiled version of module.")
click.echo("Installed '{}'.".format(name))
else:
click.echo("Unknown module named, '{}'.".format(name))
# pylint: enable=too-many-locals,too-many-branches
def libraries_from_requirements(requirements):
"""
Clean up supplied requirements.txt and turn into tuple of CP libraries
:param str requirements: A string version of a requirements.txt
:return: tuple of library names
"""
libraries = ()
for line in requirements.split("\n"):
line = line.lower().strip()
if line.startswith("#") or line == "":
# skip comments
pass
else:
if any(operators in line for operators in [">", "<", "="]):
# Remove everything after any pip style version specifiers
line = re.split("[<|>|=|]", line)[0]
libraries = libraries + (line,)
return libraries
# ----------- CLI command definitions ----------- #
# The following functions have IO side effects (for instance they emit to
# stdout). Ergo, these are not checked with unit tests. Most of the
# functionality they provide is provided by the functions above, which *are*
# tested. Most of the logic of the following functions is to prepare things for
# presentation to / interaction with the user.
@click.group()
@click.option(
"--verbose", is_flag=True, help="Comprehensive logging is sent to stdout."
)
@click.option(
"--path",
type=click.Path(exists=True, file_okay=False),
help="Path to CircuitPython directory. Overrides automatic path detection.",
)
@click.version_option(
prog_name="CircUp",
message="%(prog)s, A CircuitPython module updater. Version %(version)s",
)
@click.pass_context
def main(ctx, verbose, path): # pragma: no cover
"""
A tool to manage and update libraries on a CircuitPython device.
"""
ctx.ensure_object(dict)
if verbose:
# Configure additional logging to stdout.
global VERBOSE
VERBOSE = True
verbose_handler = logging.StreamHandler(sys.stdout)
verbose_handler.setLevel(logging.INFO)
verbose_handler.setFormatter(log_formatter)
logger.addHandler(verbose_handler)
click.echo("Logging to {}\n".format(LOGFILE))
logger.info("### Started Circup ###")
if path:
device_path = path
else:
device_path = find_device()
ctx.obj["DEVICE_PATH"] = device_path
if device_path is None:
click.secho("Could not find a connected Adafruit device.", fg="red")
sys.exit(1)
global CPY_VERSION
CPY_VERSION = get_circuitpython_version(device_path)
click.echo(
"Found device at {}, running CircuitPython {}.".format(device_path, CPY_VERSION)
)
latest_version = get_latest_release_from_url(
"https://github.com/adafruit/circuitpython/releases/latest"
)
try:
if VersionInfo.parse(CPY_VERSION) < VersionInfo.parse(latest_version):
click.secho(
"A newer version of CircuitPython ({}) is available.".format(
latest_version
),
fg="green",
)
except ValueError as ex:
logger.warning("CircuitPython has incorrect semver value.")
logger.warning(ex)
@main.command()
@click.option("-r", "--requirement", is_flag=True)
@click.pass_context
def freeze(ctx, requirement): # pragma: no cover
"""
Output details of all the modules found on the connected CIRCUITPYTHON
device. Option -r saves output to requirements.txt file
"""
logger.info("Freeze")
modules = find_modules(ctx.obj["DEVICE_PATH"])
if modules:
output = []
for module in modules:
output.append("{}=={}".format(module.name, module.device_version))
for module in output:
click.echo(module)
logger.info(module)
if requirement:
cwd = os.path.abspath(os.getcwd())
for i, module in enumerate(output):
output[i] += "\n"
with open(cwd + "/" + "requirements.txt", "w", newline="\n") as file:
file.truncate(0)
file.writelines(output)
else:
click.echo("No modules found on the device.")
@main.command()
@click.pass_context
def list(ctx): # pragma: no cover
"""
Lists all out of date modules found on the connected CIRCUITPYTHON device.
"""
logger.info("List")
# Grab out of date modules.
data = [("Module", "Version", "Latest", "Major Update")]
modules = [m.row for m in find_modules(ctx.obj["DEVICE_PATH"]) if m.outofdate]
if modules:
data += modules
# Nice tabular display.
col_width = [0, 0, 0, 0]
for row in data:
for i, word in enumerate(row):
col_width[i] = max(len(word) + 2, col_width[i])
dashes = tuple(("-" * (width - 1) for width in col_width))
data.insert(1, dashes)
click.echo(
"The following modules are out of date or probably need an update.\n"
"Major Updates may include breaking changes. Review before updating.\n"
)
for row in data:
output = ""
for index, cell in enumerate(row):
output += cell.ljust(col_width[index])
if not VERBOSE:
click.echo(output)
logger.info(output)
else:
click.echo("All modules found on the device are up to date.")
@main.command()
@click.argument("modules", required=False, nargs=-1)
@click.option("--py", is_flag=True)
@click.option("-r", "--requirement")
@click.pass_context
def install(ctx, modules, py, requirement): # pragma: no cover
"""
Install a named module(s) onto the device. Multiple modules
can be installed at once by providing more than one module name, each
separated by a space.
Option -r allows specifying a text file to install all modules listed in
the text file.
TODO: Ensure there's enough space on the device, work out the version of
CircuitPytho on the device in order to copy the appropriate .mpy versions
too. ;-)
"""
available_modules = get_bundle_versions()
mod_names = {}
for module, metadata in available_modules.items():
mod_names[module.replace(".py", "").lower()] = metadata
if requirement:
cwd = os.path.abspath(os.getcwd())
requirements_txt = open(cwd + "/" + requirement, "r").read()
requested_installs = sorted(libraries_from_requirements(requirements_txt))
else:
requested_installs = sorted(modules)
click.echo(f"Searching for dependencies for: {requested_installs}")
to_install = get_dependencies(requested_installs, mod_names=mod_names)
if to_install is not None:
to_install = sorted(to_install)
click.echo(f"Ready to install: {to_install}\n")
for library in to_install:
install_module(ctx.obj["DEVICE_PATH"], library, py, mod_names)
@click.argument("match", required=False, nargs=1)
@main.command()
def show(match): # pragma: no cover
"""
Show a list of available modules in the bundle. These are modules which
*could* be installed on the device.
If MATCH is specified only matching modules will be listed.
"""
available_modules = get_bundle_versions()
module_names = sorted([m.replace(".py", "") for m in available_modules])
if match is not None:
module_names = [m for m in module_names if match in m]
click.echo("\n".join(module_names))
click.echo(
"{} shown of {} packages.".format(len(module_names), len(available_modules))
)
@main.command()
@click.argument("module", nargs=-1)
@click.pass_context
def uninstall(ctx, module): # pragma: no cover
"""
Uninstall a named module(s) from the connected device. Multiple modules
can be uninstalled at once by providing more than one module name, each
separated by a space.
"""
for name in module:
device_modules = get_device_versions(ctx.obj["DEVICE_PATH"])
name = name.lower()
mod_names = {}
for module_item, metadata in device_modules.items():
mod_names[module_item.replace(".py", "").lower()] = metadata
if name in mod_names:
library_path = os.path.join(ctx.obj["DEVICE_PATH"], "lib")
metadata = mod_names[name]
module_path = metadata["path"]
if os.path.isdir(module_path):
target = os.path.basename(os.path.dirname(module_path))
target_path = os.path.join(library_path, target)
# Remove the directory.
shutil.rmtree(target_path)
else:
target = os.path.basename(module_path)
target_path = os.path.join(library_path, target)
# Remove file
os.remove(target_path)
click.echo("Uninstalled '{}'.".format(name))
else:
click.echo("Module '{}' not found on device.".format(name))
@main.command(
short_help=(
"Update modules on the device. "
"Use --all to automatically update all modules without Major Version warnings."
)
)
@click.option(
"--all", is_flag=True, help="Update all modules without Major Version warnings."
)
@click.pass_context
def update(ctx, all): # pragma: no cover
"""
Checks for out-of-date modules on the connected CIRCUITPYTHON device, and
prompts the user to confirm updating such modules.
"""
logger.info("Update")
# Grab out of date modules.
modules = [m for m in find_modules(ctx.obj["DEVICE_PATH"]) if m.outofdate]
if modules:
click.echo("Found {} module[s] needing update.".format(len(modules)))
if not all:
click.echo("Please indicate which modules you wish to update:\n")
for module in modules:
update_flag = all
if VERBOSE:
click.echo(
"Device version: {}, Bundle version: {}".format(
module.device_version, module.bundle_version
)
)
if isinstance(module.bundle_version, str) and not VersionInfo.isvalid(
module.bundle_version
):
click.secho(
f"WARNING: Library {module.name} repo has incorrect __version__"
"\n\tmetadata. Circup will assume it needs updating."
"\n\tPlease file an issue in the library repo.",
fg="yellow",
)
if module.repo:
click.secho(f"\t{module.repo}", fg="yellow")
if not update_flag:
if module.major_update:
update_flag = click.confirm(
(
"'{}' is a Major Version update and may contain breaking "
"changes. Do you want to update?".format(module.name)
)
)
else:
update_flag = click.confirm("Update '{}'?".format(module.name))
if update_flag:
# pylint: disable=broad-except
try:
module.update()
click.echo("Updated {}".format(module.name))
except Exception as ex:
logger.exception(ex)
click.echo(
"Something went wrong, {} (check the logs)".format(str(ex))
)
# pylint: enable=broad-except
else:
click.echo("None of the modules found on the device need an update.")
# Allows execution via `python -m circup ...`
# pylint: disable=no-value-for-parameter
if __name__ == "__main__": # pragma: no cover
main()
```
#### File: tests/bad_module/my_module.py
```python
def hello():
"""A hello function"""
return "Hello, World!"
```
#### File: circup/tests/local_module.py
```python
__version__ = "1.2.3"
__repo__ = "https://github.com/adafruit/SomeLibrary.git"
def hello():
"""A hello function"""
return "Hello, World!"
```
|
{
"source": "jepler/live-wrapper",
"score": 3
}
|
#### File: live-wrapper/lwr/cdroot.py
```python
import os
import tempfile
class CDRoot:
def __init__(self, path=None):
if not path:
self.path = tempfile.mkdtemp()
else:
self.path = path
if not os.path.exists(path):
os.makedirs(path)
def __getitem__(self, i):
return CDRoot(os.path.join(self.path, i))
def __str__(self):
return self.path
```
|
{
"source": "jepler/passport.py",
"score": 3
}
|
#### File: passport.py/passport/eddimage.py
```python
from passport.wozardry import Track, raise_if
import bitarray
import json
class EDDError(Exception): pass # base class
class EDDLengthError(EDDError): pass
class EDDSeekError(EDDError): pass
class EDDReader:
def __init__(self, iostream):
self.tracks = []
for i in range(137):
raw_bytes = iostream.read(16384)
raise_if(len(raw_bytes) != 16384, EDDLengthError, "Bad EDD file (did you image by quarter tracks?)")
bits = bitarray.bitarray(endian="big")
bits.frombytes(raw_bytes)
self.tracks.append(Track(bits, 131072))
def seek(self, track_num):
if type(track_num) != float:
track_num = float(track_num)
if track_num < 0.0 or \
track_num > 35.0 or \
track_num.as_integer_ratio()[1] not in (1,2,4):
raise EDDSeekError("Invalid track %s" % track_num)
trk_id = int(track_num * 4)
return self.tracks[trk_id]
def to_json(self):
j = {"edd":
{"info":
{"synchronized":False,
"write_protected":False,
"cleaned":False
},
"meta":{}
}
}
return json.dumps(j, indent=2)
```
#### File: passport/patchers/bbf9.py
```python
from passport.patchers import Patch, Patcher
from passport.util import *
class BBF9Patcher(Patcher):
"""patch nibble check seen in Sunburst disks 1988 and later
see write-up of 4am crack no. 1165 Muppet Slate
tested on
- Muppet Slate (1988)
- Memory Building Blocks (1989)
- Odd One Out (1989)
- Regrouping (1989)
- <NAME> (1989)
- Teddy and Iggy (1990)
- 1-2-3 Sequence Me (1991)
"""
def should_run(self, track_num):
return self.g.is_prodos
def run(self, logical_sectors, track_num):
buffy = concat_track(logical_sectors)
if -1 == find.wild(buffy,
b'\x8E\xC0'
b'\x18'
b'\xA5' + find.WILDCARD + \
b'\x69\x8C'
b'\x8D'): return []
offset = find.wild(buffy,
b'\xBD\x89\xC0')
if offset == -1: return []
return [Patch(track_num, offset // 256, offset % 256, b'\x18\x60', "bbf9")]
```
#### File: passport/patchers/sunburst.py
```python
from passport.patchers import Patch, Patcher
from passport.util import *
class SunburstPatcher(Patcher):
"""RWTS with track-based address and data prologue modifications
tested on
- Challenge Math
- Safari Search
- Ten Clues
- The Factory
- Trading Post
- Word Quest
"""
def should_run(self, track_num):
return self.g.is_rwts and (track_num == 0)
def run(self, logical_sectors, track_num):
if not find.at(0x40, logical_sectors[3], b'\xD0'): return []
if not find.at(0x9C, logical_sectors[3], b'\xF0'): return []
if not find.at(0x69, logical_sectors[4], bytes.fromhex(
"48"
"A5 2A"
"4A"
"A8"
"B9 29 BA"
"8D 6A B9"
"8D 84 BC"
"B9 34 BA"
"8D FC B8"
"8D 5D B8"
"C0 11"
"D0 03"
"A9 02"
"AC"
"A9 0E"
"8D C0 BF"
"68"
"69 00"
"48"
"AD 78 04"
"90 2B")): return []
if not find.at(0x69, logical_sectors[6], bytes.fromhex(
"4C B8 B6"
"EA"
"EA"
"EA")): return []
if not find.at(0x8C, logical_sectors[8], bytes.fromhex(
"69 BA")): return []
return [Patch(0, 3, 0x40, bytes.fromhex("F0")),
Patch(0, 3, 0x9C, bytes.fromhex("D0")),
Patch(0, 6, 0x69, bytes.fromhex("20 C3 BC 20 C3 BC")),
Patch(0, 8, 0x8C, bytes.fromhex("A0 B9")),
Patch(0, 4, 0xC0, bytes.fromhex("C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA"))]
```
#### File: passport/rwts/border.py
```python
from passport.rwts.dos33 import DOS33RWTS
class BorderRWTS(DOS33RWTS):
# TODO doesn't work yet, not sure why
def reset(self, logical_sectors):
DOS33RWTS.reset(self, logical_sectors)
self.address_prologue = (logical_sectors[9][0x16],
logical_sectors[9][0x1B],
logical_sectors[9][0x20])
self.address_epilogue = (logical_sectors[9][0x25],
logical_sectors[9][0x2A])
self.data_prologue = (logical_sectors[8][0xFD],
logical_sectors[9][0x02],
logical_sectors[9][0x02])
self.data_epilogue = (logical_sectors[9][0x0C],
logical_sectors[9][0x11])
```
|
{
"source": "jepler/pyrockout",
"score": 3
}
|
#### File: jepler/pyrockout/install.py
```python
import os
import shutil
import subprocess
def match(dest, content):
if not os.path.exists(dest): return False
with open(dest, "rb") as f: destcontent = f.read()
return content == destcontent
def copy(destdir, srcdir, name, *, destname=None):
if destname is None: destname = name
src = os.path.join(srcdir, name)
dest = os.path.join(destdir, destname)
with open(src, "rb") as f: content = f.read()
put(dest, content)
def put(dest, content):
if not match(dest, content):
with open(dest, "wb") as f: f.write(content)
SRCPATH = 'src'
DSTPATH = 'CIRCUITPY'
for dirpath, dirnames, filenames in os.walk('src', followlinks=True):
outpath = os.path.join(DSTPATH, os.path.relpath(dirpath, SRCPATH))
os.makedirs(outpath, exist_ok=True)
for f in filenames:
if f.startswith('.'): continue
copy(outpath, dirpath, f)
```
|
{
"source": "jepma/bare-python-package",
"score": 3
}
|
#### File: src/bare_python_package/main.py
```python
import sys
import os
import logging
import argparse
import requests
logging.basicConfig()
logger = logging.getLogger(__name__)
def main():
p = argparse.ArgumentParser(description="...")
p.add_argument("--url", default="https://api.spotify.com/v1/search?type=artist&q=limpbizkit")
args = p.parse_args()
logger.info(f"Running with args {args}")
process_url(args.url)
def process_url(url):
r = requests.get(url)
logger.debug(r.json())
logger.info("Done processing your URL")
if __name__ == "__main__":
main()
```
|
{
"source": "jepohle/Part-IA-Flood-Control-Lab-Group-153",
"score": 4
}
|
#### File: Part-IA-Flood-Control-Lab-Group-153/floodsystem/flood.py
```python
from floodsystem.utils import sorted_by_key
def stations_level_over_threshold(stations, tol):
"""A function that takes a list of station objects and a threshold value for relative water level and returns a list of station object which
have a flood level higher than the tolerance. It takes form stations_level_over_threshold(list of station objects, tolerance (float))."""
list = []
for station in stations:
rellevel = station.relative_water_level()
if rellevel == None:
continue
elif rellevel > tol:
if rellevel > 10:
continue
else:
list.append((station, station.relative_water_level()))
return sorted_by_key(list, 1, reverse=True)
def stations_highest_rel_level(stations, N):
"""Returns the N stations with the highest relative water level from a list of station objects (stations)."""
list = []
for station in stations:
rellevel = station.relative_water_level()
if rellevel == None:
continue
if rellevel > 10:
continue
else:
list.append((station, station.relative_water_level()))
listsorted = sorted_by_key(list, 1, reverse=True)
output = [x[0] for x in listsorted]
return output[:N]
```
#### File: jepohle/Part-IA-Flood-Control-Lab-Group-153/Task1C.py
```python
from floodsystem.geo import stations_by_distance
from floodsystem.geo import stations_within_radius
from floodsystem.stationdata import build_station_list
from floodsystem.utils import sorted_by_key
def run():
stations = build_station_list()
list = stations_within_radius(stations, (52.2053, 0.1218), 10)
list.sort()
for item in list:
print(item.name)
if __name__ == "__main__":
print("*** Task 1C: CUED Part IA Flood Warning System ***")
run()
```
#### File: jepohle/Part-IA-Flood-Control-Lab-Group-153/Task1F.py
```python
from floodsystem.stationdata import build_station_list
from floodsystem.station import inconsistent_typical_range_stations
def run():
"""Prints a list of the names of stations with faulty range data."""
x = build_station_list()
inconsistentstations = inconsistent_typical_range_stations(x)
names = [station.name for station in inconsistentstations]
names.sort()
print(names)
if __name__ == "__main__":
print("*** Task 1F: CUED Part IA Flood Warning System ***")
run()
```
#### File: jepohle/Part-IA-Flood-Control-Lab-Group-153/test_analysis.py
```python
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.datafetcher import fetch_measure_levels
import numpy as np
from floodsystem.analysis import polyfit
import datetime
def test_polyfit():
x = build_station_list()
update_water_levels(x)
dates, levels = fetch_measure_levels(x[0].measure_id, datetime.timedelta(3))
test_levels = np.full(len(dates), 1).tolist()
p, d0 = polyfit(dates, test_levels, 3)
print(p)
assert round(p.c[0], 10) == 0
assert round(p.c[1], 10) == 0
assert round(p.c[2], 10) == 0
assert round(p.c[3], 10) == 1
```
#### File: jepohle/Part-IA-Flood-Control-Lab-Group-153/test_geo.py
```python
from distutils.command.build import build
from floodsystem.stationdata import build_station_list
from floodsystem.geo import rivers_by_station_number, stations_by_distance, stations_within_radius
from floodsystem.geo import stations_by_river
from floodsystem.geo import rivers_with_station
def test_stations_by_distance():
"""Tests the function of the sort by distance algorithm by checking for an increase in distance by checking the difference between the first and second entry
in the sorted list."""
x = build_station_list()
sorted = stations_by_distance(x, (51.5072, -0.1276))
sortedfirst = sorted[0]
sortedsecond = sorted[1]
diff = sortedfirst[1] - sortedsecond[1]
assert diff < 0
def test_rivers_with_stations():
"""Tests if function rivers_with_station in callable and returns the expected datatype"""
x = build_station_list()
rivers = rivers_with_station(x)
assert type(rivers) == set
def test_stations_by_river():
"""Tests if function stations_by_rivers in callable and returns the expected datatype"""
x = build_station_list()
stations = stations_by_river(x)
assert type(stations) == dict
def test_station_within_radius():
"""Test if funcion station_within_radius works correctly"""
stations = build_station_list()
stations_within_10k = stations_within_radius(stations, (52.2053, 0.1218) , 10)
id_list = []
for station in stations_within_10k:
id_list.append(station.name)
id_list.sort()
assert id_list == ['<NAME>', 'Cambridge Baits Bite', "Cambridge Byron's Pool",
'<NAME>', 'Comberton', 'Dernford', 'Girton',
'Haslingfield Burnt Mill', 'Lode', 'Oakington', 'Stapleford']
def test_rivers_by_station_number():
"""Test to see if rivers_by_station_number is callable and if it returns valid data"""
stations = build_station_list()
rivers_by_station = rivers_by_station_number(stations, 1)
test_river_name = rivers_by_station[0][0]
print("testing number of stations for " + test_river_name)
n = 0
for station in stations:
if(station.river == test_river_name):
n += 1
assert n == rivers_by_station[0][1]
```
|
{
"source": "JEPooleyOS/osdatahub-graphics",
"score": 3
}
|
#### File: osdatahub-graphics/roads/roads_walk.py
```python
from os import environ
import geopandas as gpd
import matplotlib.pyplot as plt
from osdatahub import Extent, FeaturesAPI
from shapely.affinity import translate
from shapely.geometry import LineString, MultiLineString
from shapely.ops import linemerge
# Get OS Data Hub API key
key = environ.get("OS_API_KEY")
# Define extent
extent = Extent.from_ons_code("E09000001")
# Define product
product = "zoomstack_roads_local"
# Query Features API
features_api = FeaturesAPI(key, product, extent)
local_roads = features_api.query(limit=1000000)
# Convert to GeoDataFrame
local_roads_gdf = gpd.GeoDataFrame.from_features(
local_roads['features'], crs=extent.crs)
local_roads_gdf.to_crs("EPSG:27700", inplace=True)
# Move buildings according to their area
def merge_lines(lines: list) -> LineString:
"""
Turn a list of contiguous LineStrings into one LineString
"""
multi_line = MultiLineString(lines)
return linemerge(multi_line)
def stack_lines(lines: gpd.GeoDataFrame) -> LineString:
"""
Transform all lines so that they are stacked end-to-end
"""
stacked_lines = []
end_x, end_y = 0, 0
for line in lines.itertuples():
# Extract geometry
geometry = line.geometry
# Transform lines to the end of the stack
start_x, start_y = geometry.coords[0]
shifted_line = translate(geometry,
xoff=-start_x + end_x,
yoff=-start_y + end_y)
# Update shift parameters
end_x, end_y = shifted_line.coords[-1]
# Update stacked_lines list
stacked_lines.append(shifted_line)
return merge_lines(stacked_lines)
# Iterate the line stacking with a random order each time
paths = []
for _ in range(500):
local_roads_gdf = local_roads_gdf.sample(frac=1)
path = stack_lines(local_roads_gdf)
paths.append(path)
# Create GeoSeries
gs = gpd.GeoSeries(paths)
# Plot
edgecolor = "#FFFFFF03"
background = "#222222"
fig, ax = plt.subplots(facecolor=background)
gs.plot(edgecolor=edgecolor, ax=ax)
plt.axis('off')
plt.show()
```
|
{
"source": "Jepp0103/System_Integration_Tasks",
"score": 3
}
|
#### File: Mandatory_1/python_servers/python_server_one.py
```python
import requests
import csv
def send():
while True:
print("Enter number")
number = input()
xml = f"<?xml version='1.0' encoding='utf-8'?><data>{number}</data>"
headers = {"Content-Type": "application/xml"} # set what your server accepts
response = requests.post(
"http://127.0.0.1:2222/receive-xml", data=xml, headers=headers
)
readcsv(response.text.splitlines())
def readcsv(csvdata):
reader = csv.reader(csvdata, delimiter=",")
for row in reader:
print(row)
send()
# run(host="127.0.0.1", port=1111, debug=False, reloader=True, server="paste")
```
#### File: python_integration/Lecture_2/server_one.py
```python
from bottle import run, get, post, request
import time
import requests
import threading
def timer():
while True:
result = requests.get("http://127.0.0.1:4444")
print(result.text)
time.sleep(1)
@get("/") # Can use same method name because of the decorator
def do():
return "X server 1"
@post("/letter")
def do():
letter = request.forms.get("letter")
if letter in ("a", "b", "c"):
return f"Yes I got the letter: {letter}"
else:
req = int(requests.get("http://127.0.0.1:4444").text)
print(f"Number: {req}")
req_multiply = req * 2
return f"Did not match a letter, hence a number: {str(req_multiply)}"
@ post("/signin")
def do():
# Getting the form data
name = request.forms.get("name")
email = request.forms.get("email")
return f"Hi {name}, your email is {email}"
# xtimer = threading.Thread(target=timer) # Targeting the method name timer.
# xtimer.start()
run(host="127.0.0.1", port=3333, debug=True, reloader=True)
```
#### File: python_integration/Lecture_3_5_company_nemid_fat_sms/nem_id_app.py
```python
from bottle import run, get, view, response
##############################
@get("/nemid")
@view("index_nem_id.html")
def do():
return
##############################
run(host="127.0.0.1", port=3333, debug=True, reloader=True, server="paste")
```
#### File: python_integration/Mandatory_2/xml_to_csv.py
```python
from bottle import request, run, post, response
import csv
import io
import xml.etree.ElementTree as ET
hostname = "127.0.0.1"
port = 3333
@post("/xml-to-csv")
def converXmlToCsv():
xml_data = request.body
tree = ET.parse(xml_data)
root = tree.getroot()
msg = root.text.strip()
print("message:", msg)
csv_data = ["message", msg]
result = convertToCsv(csv_data)
print("Converted CSV:", result)
response.content_type = "text/csv"
return result
def convertToCsv(input):
output = io.StringIO()
writer = csv.writer(output, delimiter=",")
writer.writerow(input)
return output.getvalue()
run(host=hostname, port=port, debug=True, reloader=True, server="paste")
```
|
{
"source": "jeppard/infotrainer",
"score": 3
}
|
#### File: jeppard/infotrainer/AllToDec.py
```python
def AllToDec_ui():
i = int(input('Ausgangsystem (z.B. 8 fรผr Oktal) '))
zahl = input('Ausgangszahl: ')
input('Dezimalzahl: ')
print(All2dec(zahl, i))
def All2dec(z, p):
s = ''
n = 0
for i in range(0, len(z)):
n += int(z[len(z) - (i + 1)], p) * (p ** i)
s += str(int(z[len(z) - (i + 1)], p)) + '*' + str(p ** i) + ' + '
s = s[:-3] + ' = ' + str(n)
return s
if __name__ == '__main__':
AllToDec_ui()
```
|
{
"source": "JeppeDruedahl/HANKPY",
"score": 2
}
|
#### File: JeppeDruedahl/HANKPY/solve.py
```python
import time
import ctypes as ct
from numba import njit, prange
import numpy as np
from scipy.sparse import spdiags, diags
from scipy.sparse.linalg import spsolve
from scipy import interpolate
from consav.misc import elapsed
# local
import modelfuncs
import income_process
eps_low = 1e-12
##############
# 1. generic #
##############
def derivatives(par,sol):
""" take numerical derivatives """
# a. differences
modelfuncs.diff(sol.v,sol.vaB,sol.vaF,axis=1,dxf=par.daaaf,dxb=par.daaab)
modelfuncs.diff(sol.v,sol.vbB,sol.vbF,axis=2,dxf=par.dbbbf,dxb=par.dbbbb)
# b. correct with boundary conditions
sol.vaB[:,0,:] = 0
sol.vaF[:,-1,:] = 1e-8
sol.vbB[:,:,0] = -999.9
sol.vbF[:,:,-1] = 1e-8
##################
# 2. preparation #
##################
def construct_switch(par,ast):
""" split markov transition matrix """
# i. generate diagonal vector and off-diagonal matrix
par.switch_diag, ast.switch_off, par.switch_off = income_process.split_markov_matrix(par,par.z_markov)
# ii. ensure sorted indices for UMFPACK
ast.switch_off.sort_indices()
def prep(par,sol,solmethod):
""" prepare sol and ast classes """
class ast: None
# a. construct switch matrix
construct_switch(par,ast)
# c. derivatives
shape = (par.Nz,par.Na,par.Nb)
sol.vbB = np.zeros(shape)
sol.vbF = np.zeros(shape)
sol.vaB = np.zeros(shape)
sol.vaF = np.zeros(shape)
sol.c_B = np.zeros(shape)
sol.c_F = np.zeros(shape)
sol.h_B = np.zeros(shape)
sol.h_F = np.zeros(shape)
sol.Hc_B = np.zeros(shape)
sol.Hc_F = np.zeros(shape)
sol.sbc_B = np.zeros(shape)
sol.sbc_F = np.zeros(shape)
sol.daBbB = np.zeros(shape)
sol.daBbF = np.zeros(shape)
sol.daFbB = np.zeros(shape)
sol.HdaFbB = np.zeros(shape)
sol.HdaBbF = np.zeros(shape)
sol.HdaBbB = np.zeros(shape)
sol.daBbF_adj = np.zeros(shape)
sol.daBbB_adj = np.zeros(shape)
# d. solution containers
sol.v = np.zeros((par.Nz,par.Na,par.Nb))
sol.c = np.zeros(shape)
sol.h = np.zeros(shape)
sol.d = np.zeros(shape)
sol.d_adj = np.zeros(shape)
sol.s = np.zeros(shape)
sol.g = np.zeros((par.Nz,par.Nab))
# e. diagonals
shape = (par.Nz,par.Nab)
sol.centdiag = np.zeros(shape)
sol.a_updiag = np.zeros(shape)
sol.b_updiag = np.zeros(shape)
sol.b_lowdiag = np.zeros(shape)
sol.a_lowdiag = np.zeros(shape)
# f. Q
sol.Qps = np.zeros((par.Nz,par.Nab+1),dtype=np.int32) # element per column
Nmax = par.Nab + 2*(par.Nab-1) + 2*(par.Nab-par.Nb)
shape = (par.Nz,Nmax)
sol.Qis = np.zeros(shape,dtype=np.int32) # indices
sol.Qxs = np.zeros(shape) # data
# pointers and pointers to pointers
ast.Qs = [None]*par.Nz
if solmethod == 'UMFPACK':
ast.p_Qps = [None]*par.Nz
ast.p_Qis = [None]*par.Nz
ast.p_Qxs = [None]*par.Nz
for iz in range(par.Nz):
# pointers
ast.p_Qps[iz] = np.ctypeslib.as_ctypes(sol.Qps[iz])
ast.p_Qis[iz] = np.ctypeslib.as_ctypes(sol.Qis[iz])
ast.p_Qxs[iz] = np.ctypeslib.as_ctypes(sol.Qxs[iz])
# pointers to pointers
ast.pp_Qps = (ct.POINTER(ct.c_long)*par.Nz)(*ast.p_Qps,)
ast.pp_Qis = (ct.POINTER(ct.c_long)*par.Nz)(*ast.p_Qis,)
ast.pp_Qxs = (ct.POINTER(ct.c_double)*par.Nz)(*ast.p_Qxs,)
# g. working memory
sol.v = np.zeros((par.Nz,par.Na,par.Nb))
sol.g = np.zeros((par.Nz,par.Nab))
ast.RHS_HJB = np.zeros(par.Nzab)
ast.RHS_KFE = np.zeros(par.Nzab)
ast.Wi = np.zeros((par.Nz,par.Nab),dtype=np.int32)
ast.W = np.zeros((par.Nz,5*par.Nab))
# list of pointers
ast.p_v = [None]*par.Nz # value function
ast.p_g = [None]*par.Nz # distribution
ast.p_RHS_HJB = [None]*par.Nz # RHS in HJB eq. sys
ast.p_RHS_KFE = [None]*par.Nz # RHS in KF eq. sys
ast.p_Wi = [None]*par.Nz # working memory for UMFPACK
ast.p_W = [None]*par.Nz # working memory for UMFPACK
for iz,i0,i1 in [(iz,iz*par.Nab,(iz+1)*par.Nab) for iz in range(par.Nz)]:
ast.p_RHS_HJB[iz] = np.ctypeslib.as_ctypes(ast.RHS_HJB[i0:i1])
ast.p_RHS_KFE[iz] = np.ctypeslib.as_ctypes(ast.RHS_KFE[i0:i1])
ast.p_v[iz] = np.ctypeslib.as_ctypes(sol.v[iz].ravel())
ast.p_g[iz] = np.ctypeslib.as_ctypes(sol.g[iz].ravel())
ast.p_Wi[iz] = np.ctypeslib.as_ctypes(ast.Wi[iz])
ast.p_W[iz] = np.ctypeslib.as_ctypes(ast.W[iz])
# pointers to pointers
ast.pp_RHS_HJB = (ct.POINTER(ct.c_double)*par.Nz)(*ast.p_RHS_HJB,)
ast.pp_RHS_KFE = (ct.POINTER(ct.c_double)*par.Nz)(*ast.p_RHS_KFE,)
ast.pp_v = (ct.POINTER(ct.c_double)*par.Nz)(*ast.p_v,)
ast.pp_g = (ct.POINTER(ct.c_double)*par.Nz)(*ast.p_g,)
ast.pp_Wi = (ct.POINTER(ct.c_long)*par.Nz)(*ast.p_Wi,)
ast.pp_W = (ct.POINTER(ct.c_double)*par.Nz)(*ast.p_W,)
# precomputed symbolic matrices in UMFPACK
ast.pp_symbolics = (ct.c_void_p*par.Nz)(*[None for _ in range(par.Nz)])
return ast
################
# 3. solve HJB #
################
@njit(parallel=True,fastmath=True)
def upwind(par,sol):
""" apply upwind scheme """
# unpack
s = sol.s
h = sol.h
c = sol.c
d = sol.d
d_adj = sol.d_adj
h_B = sol.h_B
h_F = sol.h_F
c_B = sol.c_B
c_F = sol.c_F
Hc_B = sol.Hc_B
Hc_F = sol.Hc_F
sbc_B = sol.sbc_B
sbc_F = sol.sbc_F
daBbB = sol.daBbB
daFbB = sol.daFbB
daBbF = sol.daBbF
HdaFbB = sol.HdaFbB
HdaBbF = sol.HdaBbF
HdaBbB = sol.HdaBbB
daBbF_adj = sol.daBbF_adj
daBbB_adj = sol.daBbB_adj
# loop in parallel
for iz in prange(par.Nz):
for ia in range(par.Na):
for ib in range(par.Nb):
a = par.grid_a[ia]
b = par.grid_b[ib]
z = par.grid_z[iz]
index = (iz,ia,ib)
# a. consumption and liquid savings from foc
c_F[index],h_F[index],sbc_F[index],Hc_F[index] = modelfuncs.optimal_consumption(par,sol.vbF[index],z,b,par.Rb[index],par.w) # forwards
c_B[index],h_B[index],sbc_B[index],Hc_B[index] = modelfuncs.optimal_consumption(par,sol.vbB[index],z,b,par.Rb[index],par.w) # backwards
c_0,h_0,sbc_0,Hc_0 = modelfuncs.optimal_consumption(par,-999.9,z,b,par.Rb[index],par.w) # stationary
if ib == par.Nb-1:
sbc_F[index] = 0
Hc_F[index] = -1e12
# i. conditions
validF = sbc_F[index] > 0
validB = sbc_B[index] < 0
# ii. consumption and liquid savings decision
if validF and (~validB or Hc_F[index] >= Hc_B[index]) and Hc_F[index] >= Hc_0: # forward
c[index] = c_F[index]
h[index] = h_F[index]
s[index] = sbc_F[index]
if validB and (~validF or Hc_B[index] >= Hc_F[index]) and Hc_B[index] >= Hc_0: # backwards
c[index] = c_B[index]
h[index] = h_B[index]
s[index] = sbc_B[index]
if ~validF and ~validB: # stationary
c[index] = c_0
s[index] = sbc_0
h[index] = h_0
# b. deposits from foc's
daFbB[index] = modelfuncs.transaction_cost_foc(sol.vaF[index],sol.vbB[index],a,par) # a forward, b backward
daBbF[index] = modelfuncs.transaction_cost_foc(sol.vaB[index],sol.vbF[index],a,par) # a backward, b forward
daBbB[index] = modelfuncs.transaction_cost_foc(sol.vaB[index],sol.vbB[index],a,par) # a backward, b forward
HdaFbB[index] = sol.vaF[index]*daFbB[index] - sol.vbB[index]*(daFbB[index] + modelfuncs.transaction_cost(daFbB[index],a,par))
daBbF_adj[index] = daBbF[index] + modelfuncs.transaction_cost(daBbF[index],a,par)
HdaBbF[index] = sol.vaB[index]*daBbF[index] - sol.vbF[index]*daBbF_adj[index]
daBbB_adj[index] = daBbB[index] + modelfuncs.transaction_cost(daBbB[index],a,par)
HdaBbB[index] = sol.vaB[index]*daBbB[index] - sol.vbB[index]*daBbB_adj[index]
# i. correct boundaries
if ia == 0:
HdaBbF[index] = -1e12
HdaBbB[index] = -1e12
if ia == par.Na-1: HdaFbB[index] = -1e12
if ib == 0: HdaFbB[index] = -1e12
# ii. conditions
validFB = daFbB[index] > 0 and HdaFbB[index] > 0
validBF = daBbF_adj[index] <= 0 and HdaBbF[index] > 0
validBB = daBbB_adj[index] > 0 and daBbB[index] <= 0 and HdaBbB[index] > 0
# c. find d
if validFB and (~validBF or HdaFbB[index]>=HdaBbF[index]) and (~validBB or HdaFbB[index]>=HdaBbB[index]): d[index] = daFbB[index]
if validBF and (~validFB or HdaBbF[index]>=HdaFbB[index]) and (~validBB or HdaBbF[index]>=HdaBbB[index]): d[index] = daBbF[index]
if validBB and (~validFB or HdaBbB[index]>=HdaFbB[index]) and (~validBF or HdaBbB[index]>=HdaBbF[index]): d[index] = daBbB[index]
if (~validFB and ~validBF and ~validBB): d[index] = 0
# d. find d_adj
d_adj[index] = d[index] + modelfuncs.transaction_cost(d[index],a,par)
def create_RHS_HJB(par,sol,ast,v_prev):
""" create RHS of HJB """
# a. utility
u = modelfuncs.util(par,sol.c,sol.h)
u = u.ravel()
# d. total value
v = v_prev.ravel()
ast.RHS_HJB[:] = par.DeltaHJB*u + v + par.DeltaHJB*ast.switch_off@v
@njit(parallel=True,fastmath=True)
def create_diags_HJB(par,sol):
""" create diagonals """
# unpack
centdiag = sol.centdiag
a_lowdiag = sol.a_lowdiag
a_updiag = sol.a_updiag
b_lowdiag = sol.b_lowdiag
b_updiag = sol.b_updiag
# generate ltau0
ltau0 = (par.ra+par.eta)*(par.a_max*0.999)**(1-par.ltau)
# parallel loop
for iz in prange(par.Nz):
for ia in range(par.Na):
for ib in range(par.Nb):
index = (iz,ia,ib)
# a. set mechanical drift in a
a = par.grid_a[ia]
adrift = (par.ra + par.eta)*a - ltau0*a**par.ltau + par.xi*par.w
# b. find diagonals in a and b space
a_up = np.fmax(sol.d[index],0) + np.fmax(adrift,0)
a_up /= par.daaaf[index]
a_low = -np.fmin(sol.d[index],0) - np.fmin(adrift,0)
a_low /= par.daaab[index]
b_up = np.fmax(-sol.d_adj[index],0) + np.fmax(sol.s[index],0)
b_up /= par.dbbbf[index]
b_low = -np.fmin(-sol.d_adj[index],0) - np.fmin(sol.s[index],0)
b_low /= par.dbbbb[index]
# c. update
i = ia*par.Nb + ib
a_centdiag = a_low + a_up
b_centdiag = b_low + b_up
centdiag[iz,i] = 1 + par.DeltaHJB*(a_centdiag + b_centdiag + par.rho + par.eta - par.switch_diag[iz])
if ia < par.Na-1: a_updiag[iz,i+par.Nb] = -par.DeltaHJB*a_up
if ia > 0: a_lowdiag[iz,i-par.Nb] = -par.DeltaHJB*a_low
if ib < par.Nb-1: b_updiag[iz,i+1] = -par.DeltaHJB*b_up
if ib > 0: b_lowdiag[iz,i-1] = -par.DeltaHJB*b_low
def create_Q(par,sol,ast,solmethod):
""" create Q matrix """
if solmethod == 'scipy':
create_Q_scipy(par,sol,ast,solmethod)
elif solmethod == 'UMFPACK':
create_Q_UMFPACK(par,sol)
# equivalent:
# create_Q_scipy(par,sol,ast,solmethod)
# sol.Qps[:] = 0
# sol.Qis[:] = 0
# sol.Qxs[:] = 0
# for iz in range(par.Nz):
# Qz = ast.Qs[iz]
# N = Qz.data.size
# sol.Qps[iz,:] = Qz.indptr
# sol.Qis[iz,:N] = Qz.indices
# sol.Qxs[iz,:N] = Qz.data
else:
raise('unkwon solution method')
def create_Q_scipy(par,sol,ast,solmethod):
""" create Q for use with scipy """
def remove_small(x):
I = np.abs(x) < eps_low
y = x.copy()
y[I] = 0
return y
for iz in range(par.Nz):
#order of diagionals is important to getsorted indices
ast.Qs[iz] = diags( diagonals=[
remove_small(sol.a_updiag[iz,par.Nb:]),
remove_small(sol.b_updiag[iz,1:]),
remove_small(sol.centdiag[iz,:]),
remove_small(sol.b_lowdiag[iz,:-1]),
remove_small(sol.a_lowdiag[iz,:-par.Nb]),
],
offsets=[par.Nb,1,0,-1,-par.Nb],
shape=(par.Nab,par.Nab),format='csc')
@njit(parallel=True,fastmath=True)
def create_Q_UMFPACK(par,sol):
""" create Q matrix for use in UMFPACK """
# unpack
Qps = sol.Qps
Qis = sol.Qis
Qxs = sol.Qxs
Qps[:] = 0
Qis[:] = 0
Qxs[:] = 0
# loop in parallel
for iz in prange(par.Nz):
k = 0 # number of elements (so far)
for col in range(par.Nab):
# a upper
if col >= par.Nb:
x = sol.a_updiag[iz,col]
if not np.abs(x) < eps_low:
Qis[iz,k] = col - par.Nb # row
Qxs[iz,k] = x
k += 1
# b upper
if col >= 1:
x = sol.b_updiag[iz,col]
if not np.abs(x) < eps_low:
Qis[iz,k] = col - 1 # row
Qxs[iz,k] = x
k += 1
# center
x = sol.centdiag[iz,col]
if not np.abs(x) < eps_low:
Qis[iz,k] = col # row
Qxs[iz,k] = x
k += 1
# b lower
if col <= par.Nab-2:
x = sol.b_lowdiag[iz,col]
if not np.abs(x) < eps_low:
Qis[iz,k] = col + 1 # row
Qxs[iz,k] = x
k += 1
# a lower
if col <= par.Nab-par.Nb-1:
x = sol.a_lowdiag[iz,col]
if not np.abs(x) < eps_low:
Qis[iz,k] = col + par.Nb # row
Qxs[iz,k] = x
k += 1
# update total number of elements so far
Qps[iz,col+1] = k
def solve_eq_sys_HJB(par,sol,ast,solmethod,cppfile):
""" solve equation system for HJB """
if solmethod == 'scipy':
for iz,i0,i1 in [(iz,iz*par.Nab,(iz+1)*par.Nab) for iz in range(par.Nz)]:
sol.v.ravel()[i0:i1] = spsolve(ast.Qs[iz],ast.RHS_HJB[i0:i1],permc_spec='NATURAL')
elif solmethod == 'UMFPACK':
cppfile.solve_many(par.Nab,par.Nz,ast.pp_Qps,ast.pp_Qis,ast.pp_Qxs,
ast.pp_RHS_HJB,ast.pp_v,ast.pp_symbolics,ast.pp_Wi,ast.pp_W,True,True,True,par.cppthreads)
else:
raise Exception('unkwon solution method')
def howard_improvement_steps(par,sol,ast,solmethod,cppfile):
""" take howard improvement steps """
for _ in range(par.maxiter_HIS):
# a. create RHS
v_prev_HIS = sol.v.copy()
create_RHS_HJB(par,sol,ast,v_prev_HIS)
# b. solve
solve_eq_sys_HJB(par,sol,ast,solmethod,cppfile)
# c. distance
HIS_dist = np.max(np.abs(sol.v-v_prev_HIS))
if HIS_dist < par.HIStol:
break
def solve_HJB(model,do_print=True,print_freq=100,solmethod='UMFPACK'):
""" solve HJB equation """
t0 = time.time()
# unpack
par = model.par
sol = model.sol
ast = model.ast
cppfile = model.cppfile
# solve HJB
it = 1
while it < par.maxiter_HJB:
v_prev = sol.v.copy()
# i. derivatives
derivatives(par,sol)
# ii. upwind scheme
upwind(par,sol)
# iii. RHS
create_RHS_HJB(par,sol,ast,v_prev)
# iv. diagonals
create_diags_HJB(par,sol)
# v. construct Q
create_Q(par,sol,ast,solmethod)
# vi. solve equation system
solve_eq_sys_HJB(par,sol,ast,solmethod,cppfile)
# viii. howard improvement step
if it > par.start_HIS and dist > par.stop_HIS_fac*par.HJBtol:
howard_improvement_steps(par,sol,ast,solmethod,cppfile)
# viii. check convergence
dist = np.max(np.abs(sol.v-v_prev))
if dist < par.HJBtol:
if do_print: print(f' converged in {elapsed(t0)} in iteration {it}')
break
else:
if do_print and (it < 10 or it%print_freq == 0):
print(f'{it:5d}: {dist:.16f}')
it += 1
# assert converged value function monotonicity (not always fulfilled with dense grids)
#assert np.any(np.diff(sol.v,axis = 1)<-1e-8) == 0 # monotonicity in a dimension
#assert np.any(np.diff(sol.v,axis = 2)<-1e-8) == 0 # monotonicity in b dimension
return time.time()-t0
################
# 4. solve KFE #
################
@njit(parallel=True,fastmath=True)
def create_diags_KFE(par,sol):
""" create diagonals for KFE """
# unpack
a_lowdiag = sol.a_lowdiag
a_updiag = sol.a_updiag
b_lowdiag = sol.b_lowdiag
b_updiag = sol.b_updiag
centdiag = sol.centdiag
for iz in prange(par.Nz):
for ia in range(par.Na):
for ib in range(par.Nb):
a = par.grid_a[ia]
adrift = (par.ra + par.eta)*a - par.ltau0*a**par.ltau + par.xi*par.w
a_low = -np.fmin(sol.d[iz,ia,ib] + adrift,0)/par.dab[ia]
a_up = np.fmax(sol.d[iz,ia,ib] + adrift,0)/par.daf[ia]
b_low = -np.fmin(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib],0)/par.dbb[ib]
b_up = np.fmax(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib],0)/par.dbf[ib]
# correct boundaries
if ib == par.Nb-1:
a_low = -np.fmin(sol.d[iz,ia,ib-1] + adrift,0)/par.dab[ia]
a_up = np.fmax(sol.d[iz,ia,ib-1] + adrift,0)/par.daf[ia]
b_low = -np.fmin(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib-1],0)/par.dbb[ib]
# update
i = ib*par.Na + ia
a_centdiag = a_low + a_up
b_centdiag = b_low + b_up
centdiag[iz,i] = 1 + par.DeltaKFE*(a_centdiag + b_centdiag + par.eta - par.switch_diag[iz])
a_updiag[iz,i] = -par.DeltaKFE*a_up*par.DAB_lowdiag1[i]
a_lowdiag[iz,i] = -par.DeltaKFE*a_low*par.DAB_updiag1[i]
b_updiag[iz,i] = -par.DeltaKFE*b_up*par.DAB_lowdiag2[i]
b_lowdiag[iz,i] = -par.DeltaKFE*b_low*par.DAB_updiag2[i]
return sol
def create_B(par,sol,ast,solmethod):
""" create B matrix """
# think of:
# Qps as Bps
# Qis as Bis
# Qxs as Bxs
# a. initialize
if solmethod == 'UMFPACK':
sol.Qps[:] = 0
sol.Qis[:] = 0
sol.Qxs[:] = 0
# b. construct sparse matrices
for iz in range(par.Nz):
ast.Qs[iz] = diags( diagonals=[
sol.b_lowdiag[iz,par.Na:],
sol.a_lowdiag[iz,1:],
sol.centdiag[iz,:],
sol.a_updiag[iz,:-1],
sol.b_updiag[iz,:-par.Na],
],
offsets=[par.Na,1,0,-1,-par.Na],
shape=(par.Nab,par.Nab),format='csc')
# pack information for UMFPACK
if solmethod == 'UMFPACK':
Qz = ast.Qs[iz]
N = Qz.data.size
sol.Qps[iz,:] = Qz.indptr
sol.Qis[iz,:N] = Qz.indices
sol.Qxs[iz,:N] = Qz.data
def solve_eq_sys_KFE(par,sol,ast,g_prev,solmethod,cppfile):
""" solve equation system for KFE """
# a. update g
sol.g[:] = (np.identity(par.Nz) + par.DeltaKFE*par.switch_off).T@g_prev
index = par.Na*par.Nb_neg
sol.g[:,index] = sol.g[:,index] + par.DeltaKFE*par.eta/par.dab_tilde[par.Nb_neg,0]*(par.dab_tilde.ravel()@g_prev.T)
# b. solve
if solmethod == 'scipy':
for iz in range(par.Nz):
sol.g[iz,:] = spsolve(ast.Qs[iz],sol.g[iz,:])
elif solmethod == 'UMFPACK':
ast.RHS_KFE[:] = sol.g.ravel() # copy to RHS
cppfile.solve_many(par.Nab,par.Nz,ast.pp_Qps,ast.pp_Qis,ast.pp_Qxs,
ast.pp_RHS_KFE,ast.pp_g,ast.pp_symbolics,ast.pp_Wi,ast.pp_W,True,True,True,par.cppthreads)
else:
raise Exception('unkwon solution method')
def solve_KFE(model,do_print=True,print_freq=100,solmethod='UMFPACK'):
""" solve Kolmogorov-Forward equation """
t0 = time.time()
# unpack
par = model.par
sol = model.sol
ast = model.ast
cppfile = model.cppfile
# a. diagonals
create_diags_KFE(par,sol)
# b. iterate
it = 1
while it < par.maxiter_KFE:
g_prev = sol.g.copy()
# i. construct B
create_B(par,sol,ast,solmethod)
# ii. solve equation
solve_eq_sys_KFE(par,sol,ast,g_prev,solmethod,cppfile)
# iii. check convergence
dist = np.max(np.abs(g_prev.ravel()-sol.g.ravel()))
if dist < par.KFEtol:
if do_print:
print(f' converged in {elapsed(t0)} secs in iteration {it}')
break
else:
if do_print and (it < 10 or it%print_freq == 0):
print(f'{it:5d}: {dist:.16f}')
it += 1
return time.time()-t0
##########
# 4. MPC #
##########
@njit(parallel=True,fastmath=True)
def create_diags_cumcon(par,sol):
""" create diagonals for cumulative consumption """
# unpack
a_lowdiag = sol.a_lowdiag
a_updiag = sol.a_updiag
b_lowdiag = sol.b_lowdiag
b_updiag = sol.b_updiag
centdiag = sol.centdiag
for iz in prange(par.Nz):
for ia in range(par.Na):
for ib in range(par.Nb):
a = par.grid_a[ia]
adrift = (par.ra + par.eta)*a - par.ltau0*a**par.ltau + par.xi*par.w
a_low = -np.fmin(sol.d[iz,ia,ib] + adrift,0)/par.dab[ia]
a_up = np.fmax(sol.d[iz,ia,ib] + adrift,0)/par.daf[ia]
b_low = -np.fmin(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib],0)/par.dbb[ib]
b_up = np.fmax(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib],0)/par.dbf[ib]
# correct boundaries
if ib == par.Nb-1:
a_low = -np.fmin(sol.d[iz,ia,ib-1] + adrift,0)/par.dab[ia]
a_up = np.fmax(sol.d[iz,ia,ib-1] + adrift,0)/par.daf[ia]
b_low = -np.fmin(sol.s[iz,ia,ib] - sol.d_adj[iz,ia,ib-1],0)/par.dbb[ib]
# update
i = ib*par.Na + ia
a_centdiag = a_low + a_up
b_centdiag = b_low + b_up
centdiag[iz,i] = 1 + par.DeltaCUMCON*(a_centdiag + b_centdiag - par.switch_diag[iz])
a_updiag[iz,i] = -par.DeltaCUMCON*a_up
a_lowdiag[iz,i] = -par.DeltaCUMCON*a_low
b_updiag[iz,i] = -par.DeltaCUMCON*b_up
b_lowdiag[iz,i] = -par.DeltaCUMCON*b_low
def cumulative_consumption(par,sol):
# a. create diags for sparse matrix
create_diags_cumcon(par,sol)
# b. define variables and containers for solution
nsteps = int(np.round(1/par.DeltaCUMCON)) # 1 quarter
cdvec = (np.reshape(np.array([sol.c,sol.d]),(2,par.Nz,par.Nab),order='F').swapaxes(0,1)).swapaxes(0,2)
cdcumvec = np.zeros((par.Nab,2,par.Nz))
# c. solve
for _ in range(nsteps):
cdcumvec += par.DeltaCUMCON*(cdvec + np.reshape(cdcumvec.reshape(2*par.Nab,par.Nz)@par.switch_off.T,(par.Nab,2,par.Nz)))
# sweep over z
for iz in range(par.Nz):
Bz = spdiags(data=[sol.centdiag[iz,:],
sol.a_updiag[iz,:],
sol.a_lowdiag[iz,:],
sol.b_updiag[iz,:],
sol.b_lowdiag[iz,:]],
diags=[0,-1,1,-par.Na,par.Na],
m=par.Nab, n=par.Nab,
format='csc')
cdcumvec[:,:,iz] = spsolve(Bz.T,cdcumvec[:,:,iz])
# d. calculate one quarter cumulative expected consumption
ccum1 = cdcumvec[:,0,:].reshape(par.Na,par.Nb,par.Nz,order='F')
ccum1 = (ccum1.swapaxes(0,2)).swapaxes(1,2) # change ordering so this becomes unneccessary
return ccum1
def FeynmanKac_MPC(par,sol,moms):
# a. solve PDE
ccum = cumulative_consumption(par,sol)
# b. calculate MPC's
rebamount = (500/115_000)*(moms['Y']*4)
lmpreb = np.zeros((par.Nz,par.Na,par.Nb))
for ia in range(par.Na):
for iz in range(par.Nz):
f = interpolate.interp1d(par.grid_b,ccum[iz,ia,:],fill_value='extrapolate')
lmpreb[iz,ia,:] = f(par.grid_b+rebamount)
MPCs = (lmpreb-ccum)/rebamount
return MPCs
```
#### File: HANKPY/UMFPACK/cpptools.py
```python
import os
import shutil
import time
import ctypes as ct
import numpy as np
builderfolder_func = lambda filename: f'build_{filename}'
def build_cpp_project(filename,do_print=False,force=False,clean=True):
""" build cpp project using CMake """
buildfolder = builderfolder_func(filename)
# a. check if build exists
if os.path.isdir(buildfolder):
if not force:
if do_print:
print(f'{buildfolder} is already build')
return
else:
shutil.rmtree(buildfolder)
time.sleep(2)
# b. make build
os.mkdir(buildfolder)
os.mkdir(buildfolder + '/build/')
# c. write CMakeLists.txt
CMakeLists_txt = ''
CMakeLists_txt += 'PROJECT(project)\n'
CMakeLists_txt += 'cmake_minimum_required(VERSION 2.8)\n'
CMakeLists_txt += 'set(SuiteSparse_DIR "C:/suitesparse-metis-for-windows/build/SuiteSparse")\n'
CMakeLists_txt += 'find_package(SuiteSparse CONFIG REQUIRED)\n'
CMakeLists_txt += 'set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -openmp -Ox")\n'
CMakeLists_txt += f'add_library({filename} SHARED ../{filename}.cpp)\n'
CMakeLists_txt += 'include_directories("C:/suitesparse-metis-for-windows/build/install/include/suitesparse")\n'
CMakeLists_txt += f'target_link_libraries({filename} ${{SuiteSparse_LIBRARIES}})\n'
with open(f'{buildfolder}/CMakeLists.txt', 'w') as txtfile:
txtfile.write(CMakeLists_txt)
# d. call CMake
batfile_txt = f'"C:/Program Files/CMake/bin/cmake.exe" -S{buildfolder} -B{buildfolder}/build -G"Visual Studio 15 2017 Win64"'
with open('build.bat', 'w') as batfile:
batfile.write(batfile_txt)
os.system('build.bat')
# e. clean
if clean:
os.remove('build.bat')
def compile_cpp(filename,do_print=False,force=False,clean=True):
""" compile cpp files using Visual Studio"""
buildfolder = builderfolder_func(filename)
# a. check if build exists
if os.path.isdir(f'{buildfolder}/build/Release/'):
if not force:
if do_print:
print(f'{filename} already compiled')
return
# a. write compile.bat
batfile_txt = ''
batfile_txt += 'call "C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Auxiliary/Build/vcvarsall.bat" x64\n'
batfile_txt += f'call msbuild "{os.getcwd()}/{buildfolder}/build/project.sln" /p:Configuration=Release'
with open('compile.bat', 'w') as batfile:
batfile.write(batfile_txt)
# b. run compile.bat
os.system('compile.bat')
# c. clean
if clean:
os.remove('compile.bat')
def link(filename,do_print=False):
""" link to filename with hack for getting openmp to work """
buildfolder = builderfolder_func(filename)
# a. load
cppfile = ct.cdll.LoadLibrary(f'{buildfolder}/build/Release/{filename}.dll')
# b. setup openmp and delink
cppfile.setup_omp()
delink(cppfile,do_print=False)
# c. link again
cppfile = ct.cdll.LoadLibrary(f'{buildfolder}/build/Release/{filename}.dll')
if do_print:
print('cppfile linked succesfully')
return cppfile
def delink(cppfile,do_print=False):
""" delinking cppfile is necessary before recompiling
(otherwise kernal must be re-started) """
# a. get handle
handle = cppfile._handle
# b. delete linking variable
del cppfile
# c. free handle
ct.windll.kernel32.FreeLibrary.argtypes = [ct.wintypes.HMODULE]
ct.windll.kernel32.FreeLibrary(handle)
if do_print:
print('cppfile delinked succesfully')
```
|
{
"source": "JeppeDruedahl/HighFreqInc",
"score": 3
}
|
#### File: JeppeDruedahl/HighFreqInc/moments.py
```python
import numpy as np
from numba import njit
###########
# 1. base #
###########
@njit
def mean_var_skew_kurt(x):
""" calculate mean, variance, skewness and kurtosis """
# a. allocate memory and initialize
out = np.zeros(4)
mean = out[0:]
var = out[1:]
skew = out[2:]
kurt = out[3:]
Nactive = 0
# b. determine sum and active
for i in range(x.size):
if ~np.isnan(x[i]):
Nactive += 1
mean[0] += x[i]
# c. means
if Nactive == 0:
mean[0] = np.nan
else:
mean[0] /= Nactive
# d. variance, skewness and kurtosis
for i in range(x.size):
if Nactive == 0: continue
if ~np.isnan(x[i]):
diff = x[i]-mean[0]
diff2 = diff*diff
var[0] += diff2
skew[0] += diff2*diff
kurt[0] += diff2*diff2
# e. results
if Nactive > 0:
var[0] /= Nactive-1
else:
var[0] = np.nan
if Nactive > 2:
cor_fac = Nactive/((Nactive-1)*(Nactive-2))
skew[0] *= cor_fac
skew[0] /= var[0]**(3/2)
else:
skew[0] = np.nan
if Nactive > 3:
cor_fac = (((Nactive-1)/Nactive)*((Nactive-2)/(Nactive+1))*(Nactive-3))
cor_sub = 3*(Nactive-1)*(Nactive-1) / ((Nactive-2)*(Nactive-3))
kurt[0] /= cor_fac
kurt[0] /= var[0]*var[0]
kurt[0] -= cor_sub
else:
kurt[0] = np.nan
return out
@njit
def cov(a,b):
""" calculate covariance """
# a. initialize
mean_a = 0.0
mean_b = 0.0
Nactive = 0
# b. determine sums and active
for i in range(a.size):
if ~np.isnan(a[i]) and ~np.isnan(b[i]):
Nactive += 1
mean_a += a[i]
mean_b += b[i]
# c. means
if Nactive == 0:
return np.nan
mean_a /= Nactive
mean_b /= Nactive
# d. covariance
cov = 0.0
for i in range(a.size):
if ~np.isnan(a[i]) and ~np.isnan(b[i]):
cov += (a[i]-mean_a)*(b[i]-mean_b)
# e. result
cov /= (Nactive-1)
return cov
@njit
def share_in_range(x,omegas_low,omegas_high):
""" calculate share in range """
# a. allocate memory and inialize
Nomegas = omegas_low.size
Ntrue = np.zeros(Nomegas)
Nactive = 0
# b. compute
for i in range(x.size):
if ~np.isnan(x[i]):
Nactive += 1
for h in range(Nomegas):
if (x[i] >= omegas_low[h]) and (x[i] <= omegas_high[h]):
Ntrue[h] += 1
else:
break # assuming ordered from high to low
# c. result
out = np.zeros(Nomegas)
if Nactive > 0:
for h in range(Nomegas):
out[h] = Ntrue[h]/Nactive
else:
for h in range(Nomegas):
out[h] = np.nan
return out
@njit
def share_in_range_cond(x,y,omegas_low,omegas_high,cond_low,cond_high):
""" calculate conditional share in range """
# a. allocate memory and inialize
Nomegas = omegas_low.size
Ntrue = np.zeros(Nomegas)
Ntrue_l = np.zeros(Nomegas)
Ntrue_u = np.zeros(Nomegas)
Nactive = 0
Nactive_l = 0
Nactive_u = 0
# b. compute
for i in range(x.size):
if ~np.isnan(x[i]) and (y[i] >= cond_low) and (y[i] <= cond_high):
Nactive += 1
for h in range(Nomegas):
if (x[i] >= omegas_low[h]) and (x[i] <= omegas_high[h]):
Ntrue[h] += 1
else:
break # assuming ordered from high to low
if ~np.isnan(x[i]) and y[i] < cond_low:
Nactive_l += 1
for h in range(Nomegas):
if (x[i] >= omegas_low[h]) and (x[i] <= omegas_high[h]):
Ntrue_l[h] += 1
else:
break # assuming ordered from high to low
if ~np.isnan(x[i]) and y[i] > cond_high:
Nactive_u += 1
for h in range(Nomegas):
if (x[i] >= omegas_low[h]) and (x[i] <= omegas_high[h]):
Ntrue_u[h] += 1
else:
break # assuming ordered from high to low
# c. result
out = np.zeros((3,Nomegas))
if Nactive > 0:
for h in range(Nomegas):
out[0,h] = Ntrue[h]/Nactive
out[1,h] = Ntrue_l[h]/Nactive_l
out[2,h] = Ntrue_u[h]/Nactive_u
else:
for h in range(Nomegas):
out[0,h] = np.nan
out[1,h] = np.nan
out[2,h] = np.nan
return out
######################
# 2. autocovariances #
######################
def auto_cov_d1y1l(par,sim,l):
assert l > 0
return cov(sim.d1ky[0,:,l:].ravel(),sim.d1ky[0,:,:-l].ravel())
def auto_cov_d12y12l(par,sim,l):
assert l > 0
return cov(sim.d12ky[0,:,12*l:].ravel(),sim.d12ky[0,:,:-12*l].ravel())
def frac_auto_cov_d12y1l(par,sim,l):
assert l > 0 and l < 12
return cov(sim.d12ky[0,:,l:].ravel(),sim.d12ky[0,:,:-l].ravel())
# windsorized
def auto_cov_d1yw1l(par,sim,l):
assert l > 0
return cov(sim.d1kyw[0,:,l:].ravel(),sim.d1kyw[0,:,:-l].ravel())
def auto_cov_d12yw12l(par,sim,l):
assert l > 0
return cov(sim.d12kyw[0,:,12*l:].ravel(),sim.d12kyw[0,:,:-12*l].ravel())
def frac_auto_cov_d12yw1l(par,sim,l):
assert l > 0 and l < 12
return cov(sim.d12kyw[0,:,l:].ravel(),sim.d12kyw[0,:,:-l].ravel())
###########
# 4. fast #
###########
def get_omegas(model,momname,k):
""" unpack omegas from moment specification """
if momname in model.specs:
return np.array([info['args'][1] for info in model.specs[momname] if info['args'][0] == k])
else:
return np.array([])
def fast(model,momname,args):
""" function for calculating moments fast """
# a. mean_var_skew_kurt
_d12ky_list = ['mean_d12ky','var_d12ky','skew_d12ky','kurt_d12ky']
_d12kyw_list = ['mean_d12kyw','var_d12kyw','skew_d12kyw','kurt_d12kyw']
if (momname in _d12ky_list) or (momname in _d12kyw_list):
# i. compute
k = args
if momname in _d12ky_list:
x = model.sim.d12ky[k-1].ravel()
else:
x = model.sim.d12kyw[k-1].ravel()
out = mean_var_skew_kurt(x)
# ii. unpack
if momname in _d12ky_list:
_d12ky_list_now = _d12ky_list
else:
_d12ky_list_now = _d12kyw_list
for i,momname_ in enumerate(_d12ky_list_now):
if momname_ in model.specs:
model.moms[(momname_,args)] = out[i]
return True
_d1ky_list = ['mean_d1ky','var_d1ky','skew_d1ky','kurt_d1ky']
_d1kyw_list = ['mean_d1kyw','var_d1kyw','skew_d1kyw','kurt_d1kyw']
if (momname in _d1ky_list) or (momname in _d1kyw_list):
# i. compute
k = args
if _d1ky_list:
x = model.sim.d1ky[k-1].ravel()
else:
x = model.sim.d1kyw[k-1].ravel()
out = mean_var_skew_kurt(x)
# ii. unpack
if momname in _d1ky_list:
_d1ky_list_now = _d1ky_list
else:
_d1ky_list_now = _d1kyw_list
for i,momname_ in enumerate(_d1ky_list_now):
if momname_ in model.specs:
model.moms[(momname_,args)] = out[i]
return True
# b. leq
if momname in ['leq_d12ky','cdf_d12ky']:
# i. k and omegas
k = args[0]
if momname == 'leq_d12ky':
omegas_high = get_omegas(model,'leq_d12ky',k)
else:
omegas_high = model.par.omegas_cdf
omegas_low = -np.inf*np.ones(omegas_high.size)
# ii. compute
x = model.sim.d12ky[k-1].ravel()
out = share_in_range(x,omegas_low,omegas_high)
# iii. unpack
for i,omega in enumerate(omegas_high):
if momname == 'leq_d12ky':
model.moms[(momname,(k,omega))] = out[i]
else:
model.moms[(momname,(k,i))] = out[i]
return True
if momname in ['leq_d1ky','cdf_d1ky']:
# i. k and omegas
k = args[0]
if momname == 'leq_d1ky':
omegas_high = get_omegas(model,'leq_d1ky',k)
else:
omegas_high = model.par.omegas_cdf
omegas_low = -np.inf*np.ones(omegas_high.size)
# ii. compute
x = model.sim.d1ky[k-1].ravel()
out = share_in_range(x,omegas_low,omegas_high)
# iii. unpack
for i,omega in enumerate(omegas_high):
if momname == 'leq_d1ky':
model.moms[(momname,(k,omega))] = out[i]
else:
model.moms[(momname,(k,i))] = out[i]
return True
# c. leq_d*ky_midrange
if momname in ['leq_d12ky_midrange','cdf_d12ky_midrange']:
# i. k and omegas
k = args[0]
if momname == 'leq_d12ky_midrange':
omegas_high = get_omegas(model,'leq_d12ky_midrange',k)
else:
omegas_high = model.par.omegas_cdf
omegas_low = -np.inf*np.ones(omegas_high.size)
# ii. compute
x = model.sim.d12ky[k-1].ravel()
y = model.sim.d12ky_lag[k-1].ravel()
cond_low = -model.par.omega_cond_midrange
cond_high = model.par.omega_cond_midrange
out = share_in_range_cond(x,y,omegas_low,omegas_high,cond_low,cond_high)
# iii. unpack
for i,omega in enumerate(omegas_high):
if momname == 'leq_d12ky_midrange':
model.moms[('leq_d12ky_midrange',(k,omega))] = out[0,i]
if ('leq_d12ky_midrange_l',(k,omega)) in model.specs: model.moms[('leq_d12ky_midrange_l',(k,omega))] = out[1,i]
if ('leq_d12ky_midrange_u',(k,omega)) in model.specs: model.moms[('leq_d12ky_midrange_u',(k,omega))] = out[2,i]
else:
model.moms[(momname,(k,i))] = out[0,i]
return True
if momname in ['leq_d1ky_midrange','cdf_d1ky_midrange']:
# i. k and omegas
k = args[0]
if momname == 'leq_d1ky_midrange':
omegas_high = get_omegas(model,'leq_d1ky_midrange',k)
else:
omegas_high = model.par.omegas_cdf
omegas_low = -np.inf*np.ones(omegas_high.size)
# ii. compute
x = model.sim.d1ky[k-1].ravel()
y = model.sim.d1ky_lag[k-1].ravel()
cond_low = -model.par.omega_cond_midrange
cond_high = model.par.omega_cond_midrange
out = share_in_range_cond(x,y,omegas_low,omegas_high,cond_low,cond_high)
# iii. unpack
for i,omega in enumerate(omegas_high):
if momname == 'leq_d1ky_midrange':
model.moms[('leq_d1ky_midrange',(k,omega))] = out[0,i]
if ('leq_d1ky_midrange_l',(k,omega)) in model.specs: model.moms[('leq_d1ky_midrange_l',(k,omega))] = out[1,i]
if ('leq_d1ky_midrange_l',(k,omega)) in model.specs: model.moms[('leq_d1ky_midrange_u',(k,omega))] = out[2,i]
else:
model.moms[(momname,(k,i))] = out[0,i]
return True
if momname == 'var_y_d12_diff':
var_y = np.var(model.sim.y,axis=0)
for k in range(1,model.par.kmax):
model.moms[('var_y_d12_diff',k)] = np.mean(var_y[12*k:]-var_y[:-12*k])
return True
if momname == 'cov_y_y_d12_diff':
cov_y_d12_diff_ = np.nan*np.ones((model.par.kmax-2,model.par.simT))
for t in range(model.par.simT-12):
cov_short = cov(model.sim.y[:,t],model.sim.y[:,t+12])
for k in range(1,model.par.kmax-1):
if t+12+12*k < model.par.simT:
cov_long = cov(model.sim.y[:,t],model.sim.y[:,t+12+12*k])
cov_y_d12_diff_[k-1,t] = cov_long-cov_short
for k in range(1,model.par.kmax-1):
model.moms[('cov_y_y_d12_diff',k)] = np.nanmean(cov_y_d12_diff_[k-1])
return True
# did not find anything
return False
```
|
{
"source": "JeppeDruedahl/IOGE",
"score": 3
}
|
#### File: JeppeDruedahl/IOGE/HousingModel.py
```python
import time
import numpy as np
import pandas as pd
# consav package
from consav import ModelClass
from consav.misc import elapsed, nonlinspace, markov_rouwenhorst
# local
import post_decision
import negm
import nvfi
import simulate
def solve_model(model,t_min=0,do_print=True):
""" solve the model """
par = model.par
sol = model.sol
t0_outer = time.time()
# a. re-set up grids
t0 = time.time()
model.create_grids()
if do_print: print(f'setup grids in {elapsed(t0)}')
# c. time loop
for t in reversed(range(t_min,par.T)):
t0 = time.time()
# i. post-decions
t0_pd = time.time()
post_decision.compute_wq_renters(par,sol,t)
post_decision.compute_wq_owners(par,sol,t)
t_pd = elapsed(t0_pd)
# ii. negm
t0_negm = time.time()
negm.solve_renters(par,sol,t)
negm.solve_owners(par,sol,t)
t_negm = elapsed(t0_negm)
# iii. evaluate values of each discrete choice
t0_evaluate = time.time()
nvfi.evaluate_rt(par,sol,t)
nvfi.evaluate_ft(par,sol,t)
nvfi.evaluate_bt(par,sol,t)
t_evaluate = elapsed(t0_evaluate)
# iv. final nvfi
t0_nvfi = time.time()
nvfi.solve_renters(par,sol,t)
t_nvfi_r = elapsed(t0_nvfi)
t0_nvfi = time.time()
nvfi.solve_owners(par,sol,t)
t_nvfi_o = elapsed(t0_nvfi)
if do_print:
msg = f't = {t:2d} solved in {elapsed(t0)}'
msg += f'[pd: {t_pd}, negm: {t_negm}, evaluate: {t_evaluate}, nvfi_r: {t_nvfi_r}, nvfi_o: {t_nvfi_o}]'
print(msg)
if do_print: print(f'model solved in {elapsed(t0_outer)}')
def simulate_model(model,do_print=True,seed=1986):
""" simulate the model """
if not seed is None: np.random.seed(seed)
par = model.par
sol = model.sol
sim = model.sim
t0_outer = time.time()
# a. draw random numbers
sim.i_beta[:] = np.random.choice(par.Nbeta,size=par.simN) # preferences
sim.a0[:] = np.random.gamma(par.a0_shape,par.a0_scale,size=par.simN) # initial assets
sim.pi_p[:] = np.random.uniform(size=(par.simN,par.T)) # income process
sim.pi_c[:] = np.random.uniform(size=(par.simN,par.T)) # discrete choice
# b. simulate
simulate.simulate(par,sol,sim)
if do_print: print(f'model simulated in {elapsed(t0_outer)}')
# class
class HousingModelClass(ModelClass):
def setup(self):
""" set baseline parameters in .par """
par = self.par
# specify list over parameters, which are allowed not to be floats
self.not_float_list = ['T','TR','age_min','t_min','Delta_iota',
'Nbeta','Na','Niota','Nh','Nht','Np','NLTV','Nm','Nx','Nz',
'Nrt','Nbt','Nft','Nkt','Ncr','Nco','do_sim','simN']
# a. economic parameters
# life-cycle
par.T = 55 # life-span from age_min
par.TR = 37 # working-life-span from age_min
par.age_min = 25 # only used in figures
par.t_min = 0 # used when solving
# income
par.rho_p = 0.99 # persistence of income shocks
par.sigma_p = 0.30 # std. of income shocks
par.G = np.ones(par.T) # age-specific growth factors of income
par.G[:20] = 1.066
par.G[20:par.TR] = 1.015
par.G[par.TR:] = 0.96
par.retirement_drop = 1.00 # drop in income at retirement
# assets and housing
par.ra = 0.035 # return on liquid assets
par.rm = 0.040 # mortgage interest rate
par.rb = 0.070 # bank loan interest rate
par.ph = 1.000 # housing price
par.rh = 0.045 # rental price
par.delta = 0.0075 # mortgage interest only spread
par.gamma_m = 0.050 # mortgage repayment rate
par.gamma_b = 0.100 # bank loan repayment rate
par.tau_f = 0.100 # loan refinancing cost
par.tau_h = 0.200 # moving-in cost for owners
par.tau_ht = 0.010 # moving-in cost for renters
par.kappa_p = 4.00 # loan-to-income ratio
par.kappa_h = 0.95 # loan-to-value ratio
par.kappa_h_mortgage = 0.80 # loan-to-value ratio (mortgage)
par.grid_h = np.array([2.0,4.0,6.0,8.0,10.0,15.0,20.0,25.0,30.0,35.0],dtype=np.float_) # housing choices
par.grid_ht = par.grid_h.copy()
par.Niota = 2 # maximum interest only period
par.Delta_iota = 0 # = 0 permanent interest only possible, else = 1
# preferences
par.beta_mean = 0.96
par.beta_low = 0.85
par.beta_high = 0.99
par.rho = 2.0 # CRRA parameter
par.nu = 20.0 # bequest utility multiplicative scaling
par.zeta = 8.0 # bequest utility additive scaling
par.alpha = 0.70 # non-durable weight
par.omega = 1.20 # homeowner bonus
par.sigma = 0.025 # smoothing
par.sigma_agg = 0.050 # smoothing
# b. computational parameters
par.Nbeta = 3 # grid for beta
par.Np = 7 # grid for p
par.NLTV = 20 # grid for LTV
par.LTV_phi = 1.0 # 1 -> equally spaced, > 1 more points closer to kappa_p
par.Na = 100 # grid for a
par.a_min = 0.0
par.a_max = 50.0
par.a_phi = 1.25 # 1 -> equally spaced, > 1 more points closer to min
par.Nx = 200 # grid for x
par.x_min = 0.0
par.x_max = 80.0
par.x_phi = 1.25 # 1 -> equally spaced, > 1 more points closer to min
par.Nz = 200 # grid for z
par.z_min = 0.0
par.z_max = 50.0
par.z_phi = 1.25 # 1 -> equally spaced, > 1 more points closer to min
# c. simulation parameters
par.do_sim = True
par.a0_shape = 0.1
par.a0_scale = 5.0
par.simN = 100_000
def create_grids(self):
""" create grids """
par = self.par
assert par.Delta_iota in [0,1]
# a. states
if par.Nbeta == 1:
par.grid_beta = np.array([par.beta_mean])
else:
par.grid_beta = np.array([par.beta_low,par.beta_mean,par.beta_high])
assert par.Nbeta == par.grid_beta.size
par.grid_LTV = np.flip(par.kappa_h-nonlinspace(0.0,par.kappa_h,par.NLTV,par.LTV_phi))
par.grid_a = nonlinspace(par.a_min,par.a_max,par.Na,par.a_phi)
par.grid_z = nonlinspace(0,par.z_max,par.Nz,par.z_phi)
par.grid_x = nonlinspace(0,par.x_max,par.Nx,par.x_phi)
# inferred size of housing grids
par.Nh = par.grid_h.size # owners
par.Nht = par.grid_ht.size # renters
# infered number of discrete choices
par.Nrt = par.Nht # number of choices for renters
par.Nkt = 1 # number of choices for keepers
par.Nft = par.Niota*par.NLTV # number of choices for refinancers
par.Nbt = par.Niota*par.Nh*par.NLTV # number of choices for buyers
par.Ncr = par.Nht + par.Nbt # number of choices for lagged renters
par.Nco = par.Nht + par.Nbt + par.Nft + par.Nkt # number of choices for lagged owners
# renters
par.r_i_ht = -1*np.ones(par.Ncr,dtype=np.int_)
par.r_iota = -1*np.ones(par.Ncr,dtype=np.int_)
par.r_i_h = -1*np.ones(par.Ncr,dtype=np.int_)
par.r_i_LTV = -1*np.ones(par.Ncr,dtype=np.int_)
par.r_d = -1*np.ones(par.Ncr,dtype=np.int_)
# owners
par.o_i_ht = -1*np.ones(par.Nco,dtype=np.int_)
par.o_iota = -1*np.ones(par.Nco,dtype=np.int_)
par.o_i_h = -1*np.ones(par.Nco,dtype=np.int_)
par.o_i_LTV = -1*np.ones(par.Nco,dtype=np.int_)
par.o_d = -1*np.ones(par.Nco,dtype=np.int_)
# rt
i = 0
for i_ht in range(par.Nht):
par.r_i_ht[i] = i_ht
par.r_d[i] = 0
par.o_i_ht[i] = i_ht
par.o_d[i] = 0
i += 1
# bt
for iota in range(par.Niota):
for i_h in range(par.Nh):
for i_LTV in range(par.NLTV):
par.r_iota[i] = iota
par.r_i_h[i] = i_h
par.r_i_LTV[i] = i_LTV
par.r_d[i] = 1
par.o_iota[i] = iota
par.o_i_h[i] = i_h
par.o_i_LTV[i] = i_LTV
par.o_d[i] = 1
i += 1
# ft
for iota in range(par.Niota):
for i_LTV in range(par.NLTV):
par.o_iota[i] = iota
par.o_i_LTV[i] = i_LTV
par.o_d[i] = 2
i += 1
# kt
par.o_d[i] = 3
# b. income
out_ = markov_rouwenhorst(par.rho_p,par.sigma_p,par.Np)
par.grid_p, par.trans_p, par.ergodic_p, par.trans_cs_p, par.ergodic_cs_p = out_
par.Gamma = np.empty(par.T)
for t in range(par.T):
if t == 0: par.Gamma[t] = 1
else: par.Gamma[t] = par.G[t]*par.Gamma[t-1]
if t == par.TR: par.Gamma[t] *= par.retirement_drop
def allocate(self):
""" create grids and allocate memory for .par, .sol and .sim """
par = self.par
sol = self.sol
sim = self.sim
# a. parameters
self.create_grids()
# b. solution
# post-decison
post_r_shape = (par.T,par.Nbeta,par.Nht,par.Np,par.Na)
sol.r_q = np.nan*np.ones(post_r_shape)
sol.r_w = np.nan*np.ones(post_r_shape)
sol.r_inv_w = np.nan*np.ones(post_r_shape)
post_o_shape = (par.T,par.Nbeta,par.Niota,par.Nh,par.Np,par.NLTV,par.Na)
sol.o_q = np.nan*np.ones(post_o_shape)
sol.o_w = np.nan*np.ones(post_o_shape)
sol.o_inv_w = np.nan*np.ones(post_o_shape)
# consumption
negm_r_shape = (par.T,par.Nbeta,par.Nht,par.Np,par.Nz)
sol.r_inv_vbar = np.nan*np.ones(negm_r_shape)
sol.r_inv_mubar = np.nan*np.ones(negm_r_shape)
sol.r_cbar = np.nan*np.ones(negm_r_shape)
negm_o_shape = (par.T,par.Nbeta,par.Niota,par.Nh,par.Np,par.NLTV,par.Nz)
sol.o_inv_vbar = np.nan*np.ones(negm_o_shape)
sol.o_inv_mubar = np.nan*np.ones(negm_o_shape)
sol.o_cbar = np.nan*np.ones(negm_o_shape)
# intermediary
rt_shape = (par.T,par.Nbeta,par.Nht+1,par.Np,par.Nx,par.Nht)
sol.rt_inv_v = np.nan*np.ones(rt_shape)
sol.rt_inv_mu = np.nan*np.ones(rt_shape)
bt_shape = (par.T,par.Nbeta,par.Np,par.Nx,par.Niota,par.Nh,par.NLTV)
sol.bt_inv_v = np.nan*np.ones(bt_shape)
sol.bt_inv_mu = np.nan*np.ones(bt_shape)
ft_shape = (par.T,par.Nbeta,par.Nh,par.Np,par.Nx,par.Niota,par.NLTV)
sol.ft_inv_v = np.nan*np.ones(ft_shape)
sol.ft_inv_mu = np.nan*np.ones(ft_shape)
# final
final_r_shape = (par.T,par.Nbeta,par.Nht,par.Np,par.Na)
sol.r_v = np.nan*np.ones(final_r_shape)
sol.r_mu = np.nan*np.ones(final_r_shape)
sol.r_d = np.nan*np.ones(final_r_shape,dtype=np.int_)
final_o_shape = (par.T,par.Nbeta,par.Niota,par.Nh,par.Np,par.NLTV,par.Na)
sol.o_v = np.nan*np.ones(final_o_shape)
sol.o_mu = np.nan*np.ones(final_o_shape)
sol.o_d = np.nan*np.ones(final_o_shape,dtype=np.int_)
# b. simulation
sim_shape = (par.simN,par.T)
sim.d = -1*np.ones(sim_shape,dtype=np.int_)
sim.iota = -1*np.ones(sim_shape,dtype=np.int_)
sim.i_h = -1*np.ones(sim_shape,dtype=np.int_)
sim.i_ht = -1*np.ones(sim_shape,dtype=np.int_)
sim.i_p = -1*np.ones(sim_shape,dtype=np.int_)
sim.LTV = np.nan*np.ones(sim_shape)
sim.a = np.nan*np.ones(sim_shape)
sim.c = np.nan*np.ones(sim_shape)
sim.Nc = np.nan*np.ones(sim_shape)
sim.x = np.nan*np.ones(sim_shape)
sim.i_beta = -1*np.ones(par.simN,dtype=np.int_)
sim.a0 = np.nan*np.ones(par.simN)
sim.pi_p = np.nan*np.ones(sim_shape)
sim.pi_c = np.nan*np.ones(sim_shape)
def load_data(self):
self.owner = dict()
self.renter = dict()
self.full = dict()
stats = ['count', 'mean', 'p10', 'p25', 'p50', 'p75', 'p90']
for stat in stats:
self.owner[stat] = pd.read_csv(f'moments/{stat}_by_age_owner.txt', index_col='fam_age')
self.renter[stat] = pd.read_csv(f'moments/{stat}_by_age_renter.txt', index_col='fam_age')
self.full[stat] = pd.read_csv(f'moments/{stat}_by_age_all.txt', index_col='fam_age')
solve = solve_model
simulate = simulate_model
def test(self):
""" used for testing parallel possibilities """
solve_model(self,t_min=0,do_print=True)
self.simulate()
```
|
{
"source": "JeppeDruedahl/MPCF",
"score": 2
}
|
#### File: JeppeDruedahl/MPCF/figs.py
```python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
plt.style.use("seaborn-whitegrid")
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
def _cfunc(model):
# a. unpack
par = model.par
sol = model.sol
# b. figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# c. plot consumption
for idelta in range(0,par.Ndelta,10):
ax.plot(par.grid_m,sol.c[0,idelta,:],ls='-',label=f'$\delta_t = {par.grid_delta[idelta]:.2f}$')
# d. details
ax.set_xlabel('cash-on-hand, $m_t$')
ax.set_ylabel('consumption, $c_t$')
if par.Ndelta > 1:
ax.legend(frameon=True)
return fig,ax
def cfunc(model,m_max=12,c_max=1.5,postfix='',savefig=False):
fig,ax = _cfunc(model)
ax.set_xlim([0,m_max])
ax.set_ylim([0,c_max])
ax.set_xticks(np.arange(0,m_max+1,1))
fig.tight_layout()
if savefig: fig.savefig(f'figs/cfunc{postfix}.pdf')
def _MPC(model):
# a. unpack
par = model.par
sol = model.sol
# b. figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# c. plot consumption
MPC = np.diff(sol.c[0,0,:])/np.diff(par.grid_m)
ax.plot(par.grid_m[:-1],MPC,ls='-')
# d. details
ax.set_xlabel('cash-on-hand, $m_t$')
ax.set_ylabel('MPC')
ax.axhline(par.MPC_PF,ls='--',lw=1,color=colors[0])
ax.text(0.25,1.1*par.MPC_PF,'PIH')
return fig,ax
def MPC(model,m_max=12,postfix='',savefig=False):
# a. zoom
fig,ax = _MPC(model)
ax.set_yscale('log')
ax.set_xlim([model.par.grid_m[0],m_max])
ax.set_ylim([0.001,1.1])
ax.set_xticks(np.arange(0,m_max+1,1))
fig.tight_layout()
if savefig: fig.savefig(f'figs/MPC{postfix}.pdf')
# b. convergence
fig,ax = _MPC(model)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim([0.1,model.par.grid_m[-1]])
ax.set_ylim([0.001,1.1])
fig.tight_layout()
if savefig: fig.savefig(f'figs/MPC_convergence{postfix}.pdf')
def _MPCF(model,taus=[0,1,4,12],show_analytical=False):
# a. unpack
par = model.par
sol = model.sol
# b. figure
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# c. plot consumption
for tau in taus:
MPCF = (sol.c[tau,5,:]-sol.c[tau,0,:])/(par.grid_delta[5]-par.grid_delta[0])
ax.plot(par.grid_m,MPCF,label=f'$\\tau_t = {tau}$')
# d. details
ax.set_xlabel('cash-on-hand, $m_t$')
ax.set_ylabel('MPCF')
if show_analytical:
for i,tau in enumerate(taus):
ax.axhline(par.MPCF_PF[tau],ls='--',lw=1,color=colors[i],label='')
ax.text(0.25,par.MPCF_PF[taus[-1]]-0.15,'PIH$\Rightarrow$',rotation=90)
ax.legend(frameon=True)
return fig,ax
def MPCF(model,m_max=12,taus=[1,4,6,8],show_analytical=True,postfix='',savefig=False):
# a. zoom
fig,ax = _MPCF(model,taus=taus,show_analytical=show_analytical)
ax.set_xlim([0,m_max])
ax.set_xticks(np.arange(0,m_max+1,1))
ax.set_ylim([0,1.3])
fig.tight_layout()
if savefig: fig.savefig(f'figs/MPCF{postfix}.pdf')
# b. convergence
fig,ax = _MPCF(model,taus=taus,show_analytical=True)
ax.set_xscale('log')
ax.set_xlim([0.1,model.par.grid_m[-1]])
ax.set_ylim([0,1.3])
fig.tight_layout()
if savefig: fig.savefig(f'figs/MPCF_convergence{postfix}.pdf')
def simulate(model,savefig=False,postfix=''):
# a. settings
m0s = np.array([0.75,1.5,2,4,12])
tau0 = 6
delta0 = 0.05
simN = 100_000
simT = 31
# b. simulate
m_before,c_before,C_before = model.simulate(simN,simT,m0s,tau0,0)
m_after,c_after,C_after = model.simulate(simN,simT,m0s,tau0,delta0)
# c. figure - response
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
time = np.arange(-tau0,-tau0+simT,1)
xticks = np.arange(-tau0,-tau0+simT,2)
for j,m0 in enumerate(m0s):
ax.plot(time,np.mean(C_after[j,:,:]-C_before[j,:,:],axis=1)/delta0,
ls='-',marker='o',markersize=4,
label=f'$M_0 = {m0:.2f}$')
ax.axvline(0,ls='--',lw=1,color='black')
ax.text(0.25,0.1,'$\Leftarrow$cash-flow arrives')
ax.set_xlabel('time relative to arrival of cash-flow')
ax.set_ylabel('dynamic MPCF')
ax.set_xticks(xticks)
ax.legend(frameon=True)
fig.tight_layout()
if savefig: fig.savefig(f'figs/simulation{postfix}.pdf')
# d. figure - still constrained
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# before
j = 0
ax.plot(time,np.mean(c_before[j,:,:] >= 0.99*m_before[j,:,:],axis=1),
ls='-',marker='o',markersize=4,color=colors[0],
label=f'$\Delta = 0.00$')
# after
ax.plot(time,np.mean(c_after[j,:,:] >= 0.99*m_after[j,:,:],axis=1),
ls='-',marker='o',markersize=4,color=colors[1],
label=f'$\Delta = {delta0:.2f}$')
ax.axvline(0,ls='--',lw=1,color='black')
ax.set_xlabel('time relative to arrival of cash-flow')
ax.set_ylabel('constrained, share')
ax.set_xticks(xticks)
ax.legend(frameon=True)
fig.tight_layout()
if savefig: fig.savefig(f'figs/simulation{postfix}_constrained.pdf')
```
#### File: JeppeDruedahl/MPCF/MPCFModel.py
```python
import time
import numpy as np
# consav package
from consav import ModelClass,jit # baseline model class and jit
from consav.grids import nonlinspace # grids
from consav.quadrature import create_PT_shocks # income shocks
# local modules
import utility
import post_decision
import egm
import simulate
############
# 2. model #
############
class MPCFClass(ModelClass):
#########
# setup #
#########
def settings(self):
# a. namespaces
self.namespaces = []
# b. other attributes
self.other_attrs = []
# c. savefolder
self.savefolder = 'saved'
# d. list not-floats for safe type inference
self.not_floats = ['Npsi','Nxi','Ndelta','Nm','Na','Ntau','Nshocks','max_iter','do_print']
def setup(self):
""" set baseline parameters """
par = self.par
# a. preferences
par.beta = 0.96**(1/12)
par.rho = 2.0
# b. returns and income
par.R = 1.04**(1/12)
par.G = 1.03**(1/12)
par.sigma_psi = 0.02122033
par.Npsi = 6
par.sigma_xi = 0.30467480
par.Nxi = 6
par.pi = 0.0
par.mu = np.nan
# c. extra income
par.zeta = 1.0
par.Ntau = 12
# d. grids (number of points)
par.Ndelta = 50
par.Nm = 20_000
par.m_mid = 120.0
par.m_max = 100_000.0
par.Na = 10_000
par.a_mid = par.m_mid
par.a_max = par.m_max
# e. misc
par.max_iter = 10_000
par.tol = 1e-6
par.do_print = True
def allocate(self):
""" allocate model, i.e. create grids and allocate solution and simluation arrays """
self.setup_grids()
self._solve_prep()
def setup_grids(self):
""" construct grids for states and shocks """
par = self.par
# a. states (unequally spaced vectors of length Na)
par.grid_delta = np.zeros(par.Ndelta)
if par.Ndelta > 1:
par.grid_delta[1] = 1e-4
par.grid_delta[2:] = nonlinspace(2*1e-4,0.1,par.Ndelta-2,1.3)
m_min = 0
if np.isnan(par.m_mid):
par.grid_m = nonlinspace(m_min,par.m_max,par.Nm,1.2)
else:
Nm_base = par.Nm//2
par.grid_m = np.zeros(par.Nm)
par.grid_m[:Nm_base] = nonlinspace(m_min,par.m_mid,Nm_base,1.1)
par.grid_m[Nm_base-1:] = nonlinspace(par.m_mid,par.m_max,par.Nm-Nm_base+1,1.1)
a_min = m_min+1e-6
if np.isnan(par.a_mid):
par.grid_a = nonlinspace(a_min,par.a_max,par.Na,1.2)
else:
Na_base = par.Na//2
par.grid_a = np.zeros(par.Na)
par.grid_a[:Na_base] = nonlinspace(a_min,par.a_mid,Na_base,1.1)
par.grid_a[Na_base-1:] = nonlinspace(par.a_mid,par.a_max,par.Na-Na_base+1,1.1)
# b. shocks (qudrature nodes and weights using GaussHermite)
par.Nxi = 1 if par.sigma_xi == 0 else par.Nxi
par.Npsi = 1 if par.sigma_psi == 0 else par.Npsi
shocks = create_PT_shocks(par.sigma_psi,par.Npsi,par.sigma_xi,par.Nxi,par.pi,par.mu)
par.psi,par.psi_w,par.xi,par.xi_w,par.Nshocks = shocks
# c. perfect foresight
par.MPC_PF = 1-(par.R*par.beta)**(1/par.rho)/par.R
par.MPCP_PF = par.MPC_PF/(1-1/par.R)
par.MPCF_PF = np.zeros(par.Ntau+1)
for tau in range(par.Ntau+1):
par.MPCF_PF[tau] = par.R**(-tau)*par.MPC_PF/(1-par.zeta/par.R)
#########
# solve #
#########
def _solve_prep(self):
""" allocate memory for solution """
par = self.par
sol = self.sol
sol.c = np.zeros((par.Ntau+1,par.Ndelta,par.Nm))
sol.q = np.zeros((par.Ntau+1,par.Ndelta,par.Na))
def solve(self):
par = self.par
# a. solve directly
if par.Ndelta == 1:
if par.do_print: print('solving full model\n')
self.solve_with_c0()
# b. solve in two steps
else:
# i. without delta dimension
if par.do_print: print('solving model with Ndelta=1 and Ntau=1\n')
Ntau = par.Ntau
Ndelta = par.Ndelta
tol = par.tol
Npsi = par.Npsi
Nxi = par.Nxi
par.Ntau = 1
par.Ndelta = 1
par.tol = 1e-8
par.Npsi = 3
par.Nxi = 3
self.setup_grids()
self.solve_with_c0()
c0 = self.sol.c[0,:,:].copy()
for idelta in range(par.Ndelta):
c0[idelta,:] += par.MPC_PF*1/(1-par.zeta*1/par.R)*par.grid_delta[idelta]
par.Ntau = Ntau
par.Ndelta = Ndelta
par.tol = tol
par.Npsi = Npsi
par.Nxi = Nxi
self.setup_grids()
if par.do_print: print('')
# ii. full
if par.do_print: print('solving full model\n')
self.solve_with_c0(c0=c0)
def solve_with_c0(self,c0=np.array([])):
""" solve the model using egm and the initial guess in c0 """
# a. allocate solution
self._solve_prep()
# b. backwards induction until convergence
with jit(self) as model:
par = model.par
sol = model.sol
it = 0
max_abs_diff = np.inf
while it < par.max_iter:
# i. first iteration
if it == 0:
tic = time.time()
if c0.size == 0:
for idelta in range(par.Ndelta):
b = par.grid_m-1
h = 1/(1-par.G/par.R) + 1/(1-par.zeta*1/par.R)*par.grid_delta[idelta]
sol.c[0,idelta,:] = np.fmin(par.MPC_PF*(b+h),par.grid_m)
elif c0.size:
sol.c[0,:,:] = c0
# ii. all other iteration
else:
# o. compute post-decision functions
post_decision.compute_q(sol,par,0,0)
# oo. solve bellman equation
egm.solve_bellman(sol,par,0)
# iii. check convergence
if it > 0:
max_abs_diff = np.max(np.abs(sol.c[0]-c_old))
# iv. save old consumption function
c_old = sol.c[0].copy()
# v. print
toc = time.time()
if it > 0 and it%50 == 0 and par.do_print:
print(f'{it:4d}: max_abs_diff = {max_abs_diff:12.8f} (elapsed: {toc-tic:5.1f} secs)')
if max_abs_diff < par.tol:
print('-> convergence achieved')
break
it += 1
# c. bakcwards induction through the anticipation horizon
for itau in range(1,par.Ntau+1):
# a. compute post-decision functions
post_decision.compute_q(sol,par,itau,itau-1)
# b. solve bellman equation
egm.solve_bellman(sol,par,itau)
############
# simulate #
############
def simulate(self,simN,simT,m0s,tau0,delta0,seed=1917,postfix=''):
with jit(self) as model:
par = model.par
sol = model.sol
# a. allocate
shape =(m0s.size,simT,simN)
m = np.zeros(shape)
delta = np.zeros(shape)
c = np.zeros(shape)
P = np.zeros(shape)
C = np.zeros(shape)
# b. draw random
np.random.seed(1917)
psi = np.exp(np.random.normal(loc=-0.5*par.sigma_psi**2,scale=par.sigma_psi,size=(simT,simN)))
xi = np.exp(np.random.normal(loc=-0.5*par.sigma_xi**2,scale=par.sigma_xi,size=(simT,simN)))
# c. simulate
simulate.simulate(simT,par,sol,m,delta,c,P,C,m0s,tau0,delta0,psi,xi)
return m,c,C
```
|
{
"source": "jeppefm1/Parkering",
"score": 3
}
|
#### File: Parkering/Nummerpladegenkendelse/findPlates.py
```python
import cv2
import numpy as np
import math
import numberplateRec
import imageProcess
import findChars
import classPossiblePlate
import classPossibleChar
#Konstanter der beskriver nummerplades kendetegn
PLATE_WIDTH_PADDING_FACTOR = 1.3
PLATE_HEIGHT_PADDING_FACTOR = 1.5
def findPlatesInImg(imgCaptured):
#Tom liste til at gemme mulige nummerplader
listOfPossiblePlates = []
#Finder billedets dimmensioner
height, width, numChannels = imgCaptured.shape
#Laver tomme arrays til billeder og konturer
imgGrayscaleScene = np.zeros((height, width, 1), np.uint8)
imgThreshScene = np.zeros((height, width, 1), np.uint8)
imgContours = np.zeros((height, width, 3), np.uint8)
#Anvender egen funktion til at grayscale og thessholde billedet
imgGrayscaleScene, imgThreshScene = imageProcess.preprocessImg(imgCaptured)
#Anvender egen funktion til at finde mulige bogstaver og tal i billedet
listOfPossibleCharsInScene = findPotentialCharsInIMG(imgThreshScene)
#Anvender liste med alle potentielle bogstaver eller tal til at grupperer dem efter andre der ligner dem.
listOfListsOfMatchingCharsInScene = findChars.findListOfListsOfMatchingChars(listOfPossibleCharsInScene)
#Herefter bliver der prรธvet at genkende en nummerplade ud fra hver gruppe.
for listOfMatchingChars in listOfListsOfMatchingCharsInScene:
possiblePlate = extractPlate(imgCaptured, listOfMatchingChars)
#Gemmer de potentielle nummerplader i liste
if possiblePlate.imgPlate is not None:
listOfPossiblePlates.append(possiblePlate)
#Print antallet af fundne nummerplader
print("\n" + str(len(listOfPossiblePlates)) + " mulige nummerplader fundet")
return listOfPossiblePlates
def findPotentialCharsInIMG(imgThresh):
#Laver liste til at gemme chars, samt en variabel til at tรฆlle antallet af chars
listOfPossibleChars = []
countOfPossibleChars = 0
#Kopirer billedet, da sรธgningen vil รฆndre i det.
imgThreshCopy = imgThresh.copy()
#Anvender Cv2 til at finde konturer i billedet
contours, npaHierarchy = cv2.findContours(imgThreshCopy, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
#Laver et tomt array til konturerne
height, width = imgThresh.shape
imgContours = np.zeros((height, width, 3), np.uint8)
#Loop gennem konturer
for i in range(0, len(contours)):
#Laver et nyt objekt til at gemme det potentielle char
possibleChar = classPossibleChar.PossibleChar(contours[i])
#Hvis konturen opfylder reglerne for en char, gemmes objektet i en liste.
if findChars.checkIfPossibleChar(possibleChar):
countOfPossibleChars = countOfPossibleChars + 1
listOfPossibleChars.append(possibleChar)
return listOfPossibleChars
def extractPlate(imgCaptured, listOfMatchingChars):
#Lav nummerplade objekt
possiblePlate = classPossiblePlate.PossiblePlate()
#Sorter fundende bogstaver efter x position. Listen gรฅr dermed fra venstre mod hรธjre.
#lamda keywordet kan bruges til at definere en funktion i python.
#I dette tilfรฆlde betyder det, at key bliver sat til x vรฆrdien, og det er dermed den der anvendes til sorteringen.
listOfMatchingChars.sort(key = lambda matchingChar: matchingChar.centerX)
#Find centerpunket i nummerpladen
plateCenterX = (listOfMatchingChars[0].centerX + listOfMatchingChars[len(listOfMatchingChars) - 1].centerX) / 2.0
plateCenterY = (listOfMatchingChars[0].centerY + listOfMatchingChars[len(listOfMatchingChars) - 1].centerY) / 2.0
#Gem centrum i en tuple
plateCenter = plateCenterX, plateCenterY
#Bestem nummerpladens dimmensioner - hermed hรธjde og bredde.
#Bredden bestemmes ved at finde x hjรธrnepunktet. Derefter anvendes bredden til at finde det andet hjรธrnepunkt.
#Derefter anvendes bredde konstanten til at trรฆkke kanten fra.
plateWidth = int((listOfMatchingChars[len(listOfMatchingChars) - 1].boundingRectX + listOfMatchingChars[len(listOfMatchingChars) - 1].boundingRectWidth - listOfMatchingChars[0].boundingRectX) * PLATE_WIDTH_PADDING_FACTOR)
#Hรธjden af nummerpladen findes derefter som et gennemsnit af alle bogstavernes hรธjde.
#Ogsรฅ her anvendes en konstant til at korrigere nummerpladens hรธjde.
totalCharHeights = 0
for matchingChar in listOfMatchingChars:
totalCharHeights = totalCharHeights + matchingChar.boundingRectHeight
averageCharHeight = totalCharHeights / len(listOfMatchingChars)
plateHeight = int(averageCharHeight * PLATE_HEIGHT_PADDING_FACTOR)
#Bestemmer derfor den vinkel som nummerpladen er drejet med.
#Hertil anvendes sinus relationen.
opposite = listOfMatchingChars[len(listOfMatchingChars) - 1].centerY - listOfMatchingChars[0].centerY
hypotenuse = findChars.distanceBetweenCharsFunction(listOfMatchingChars[0], listOfMatchingChars[len(listOfMatchingChars) - 1])
correctionAngleRad = math.asin(opposite / hypotenuse)
correctionAngleDeg = correctionAngleRad * (180.0 / math.pi)
#Gemmer oplysninger i klassen
possiblePlate.locationInImg = (tuple(plateCenter), (plateWidth, plateHeight), correctionAngleDeg)
#Opretter derefter en rotationsmatrix til at rotere nummerpladen.
#Hertil anvendes den beregnede vinkel.
#Syntax: cv2.getRotationMatrix2D(center, angle, scale)
rotationMatrix = cv2.getRotationMatrix2D(tuple(plateCenter), correctionAngleDeg, 1.0)
#Den oprettede matrix kan derefter anvendes til at roterer hele billedet
#Her skal billedets oplysninger bruges.
height, width, numChannels = imgCaptured.shape
imgRotated = cv2.warpAffine(imgCaptured, rotationMatrix, (width, height))
#Beskรฆrer billedet sรฅledes at det kun er nummerpladen
imgCropped = cv2.getRectSubPix(imgRotated, (plateWidth, plateHeight), tuple(plateCenter))
#Gemmer nummerpladen i klassen.
possiblePlate.imgPlate = imgCropped
#Returnerer klassen
return possiblePlate
```
#### File: Parkering/Nummerpladegenkendelse/imageProcess.py
```python
import cv2
import numpy as np
import math
#Konstanter til billedebehandlingen
FILTER_SIZE_GAUSSIAN = (5,5)
#Thresshold konstanter - skal vรฆre ullige
#Bestemmer den mรฅde billedet bliver binรฆrt simplificeret
THRESSHOLD_BLOCKSIZE = 21
THESSHOLD_WEIGHT = 11
def preprocessImg(imgUnprocessed):
#Anvend egen funktion til at Grayscale billedet, anvender HSV metoden - nรฆrmere beskrevet i funktionen.
imgGrayscaled = HSVGrayscale(imgUnprocessed)
#Anvender egen funktion til at fjerne stรธj fra subjekt og baggrund. Metode nรฆrmere beskrevet i funktionen.
imgDenoised = removeNoise(imgGrayscaled)
#Finder billedets dimmensioner
height, width = imgGrayscaled.shape
#Slรธrer billedet, hertil laves fรธrst et tomt array.
imgBlurred = np.zeros((height, width, 1), np.uint8)
#Anvender en gaussisk funktion til slรธringen.
imgBlurred = cv2.GaussianBlur(imgDenoised, FILTER_SIZE_GAUSSIAN, 0)
#Thressholder billedet - hvilket betyder at billedet bliver binรฆrt simplificeret.
#Prรธver dermed at lave billedet sort hvidt.
#Anvender en adaptiv funktion i cv2, der kigger billedet og tilpasser metoden til sort hvid konverteringen.
imgThressholded = cv2.adaptiveThreshold(imgBlurred, 255.0, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, THRESSHOLD_BLOCKSIZE, THESSHOLD_WEIGHT)
return imgGrayscaled, imgThressholded
def HSVGrayscale(imgUnprocessed):
#Et billede kan ogsรฅ beskrive som HSV (hue, saturation, value), der er et alternativ til RGB.
#Den ene vรฆrdi beskriver billedet som grรฅskaleret, og denne kan derfor udtrรฆkkes ved transformationen.
#Henter dimmensionerne fra billedet
height, width, numChannels = imgUnprocessed.shape
#Laver tomt array til transformeret billede
imgHSV = np.zeros((height, width, 3), np.uint8)
#Transformer billede til HSV
imgHSV = cv2.cvtColor(imgUnprocessed, cv2.COLOR_BGR2HSV)
#Gemmer vรฆrdier i tre variabler
imgHue, imgSaturation, imgValue = cv2.split(imgHSV)
#Skal kun bruge value, der svarer til Grayscale
return imgValue
def removeNoise(imgGrayscaled):
#Funktionen laver flere Morphological Transformations, der har til hensigt at fjerne stรธj.
#https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
#https://www.youtube.com/watch?v=YA5u2PI3hF0
#Der findes foskellige typer:
#Opening fjerner false positive i baggrunden
#Closeing fjerner false negative i subjektet
#Tophat - forskel mellem input og opening af input billedet
#Blackhat - forskel mellem input og closing af input billedet
height, width = imgGrayscaled.shape
#Tomme numpy arrays til Tophat og Blackhat
imgTopHat = np.zeros((height, width, 1), np.uint8)
imgBlackHat = np.zeros((height, width, 1), np.uint8)
#Form der anvendes til tranformationerne. Her anvendes en regtangel.
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
#Laver tophat og blackhat transformationer - se foklaring over.
imgTopHat = cv2.morphologyEx(imgGrayscaled, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(imgGrayscaled, cv2.MORPH_BLACKHAT, structuringElement)
#Lรฆgger billederne sammen - Grayscale + tophat - blackhat
imgGrayscalePlusTopHat = cv2.add(imgGrayscaled, imgTopHat)
imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)
return imgGrayscalePlusTopHatMinusBlackHat
```
#### File: Parkering/Nummerpladegenkendelse/numberplateRec.py
```python
import cv2
import numpy as np
import os
import findChars
import findPlates
import classPossiblePlate
DISPLAY_PLATE = True
COLOR_YELLOW = (0.0, 255.0, 255.0)
COLOR_GREEN = (0.0, 255.0, 0.0)
COLOR_RED = (0.0, 0.0, 255.0)
licPlate = 0
licPlate = " "
def main(IMAGE):
#Indlรฆs billede
image = cv2.imread(IMAGE)
#Anvend egne funktioner til at finde mulige nummerplader og chars
listOfPossiblePlates = findPlates.findPlatesInImg(image)
listOfPossiblePlates = findChars.detectCharsInPlates(listOfPossiblePlates)
if(DISPLAY_PLATE == True):
#Vis billede
cv2.imshow("Billede", image)
#Check om der blev fundet nummerplaer
if len(listOfPossiblePlates) != 0:
#Sorter mulige nummerplader efter lรฆngde. Her kommer den lรฆngste nummerplade fรธrst.
#Til sorteringen anvendes en lamda funktion, der finder lรฆngden af nummerpladen
listOfPossiblePlates.sort(key = lambda possiblePlate: len(possiblePlate.charsInPlate), reverse = True)
#Antag at den lรฆngste nummerplade er den korrekte
licPlate = listOfPossiblePlates[0]
if(DISPLAY_PLATE == True):
#Vis nummerpladen og thresshold billedet
cv2.imshow("imgPlate", licPlate.imgPlate)
cv2.imshow("imgThresh", licPlate.imgThressholded)
#Hvis ingen chars er genkendt
if len(licPlate.charsInPlate) == 0:
print("\nIngen chars blev opdaget\n\n")
return
if(DISPLAY_PLATE == True):
#Tegn regtangel omkring nummerplade
drawRedRectangleAroundPlate(image, licPlate)
#Print nummerpladen
print("\n Nummerplade genkendt i billede = " + licPlate.charsInPlate + "\n") # write license plate text to std out
#Vis nummerpladen pรฅ billedet
writeLicensePlateCharsOnImage(image, licPlate)
cv2.imshow("Nummerplade genkendt", image)
#Gem billedet
cv2.imwrite("nummerpladeGenkendt.png", image)
cv2.waitKey(0)
return licPlate.charsInPlate
#Funktion til at tegne rรธd regtangel omkring nummerpladen
def drawRedRectangleAroundPlate(originalScene, licPlate):
centerOfTextAreaX = 0
centerOfTextAreaY = 0
lowerLeftTextOriginX = 0
lowerLeftTextOriginY = 0
#Find punkter
rectPoints = cv2.boxPoints(licPlate.locationInImg)
cv2.line(originalScene, tuple(rectPoints[0]), tuple(rectPoints[1]), COLOR_RED, 2)
cv2.line(originalScene, tuple(rectPoints[1]), tuple(rectPoints[2]), COLOR_RED, 2)
cv2.line(originalScene, tuple(rectPoints[2]), tuple(rectPoints[3]), COLOR_RED, 2)
cv2.line(originalScene, tuple(rectPoints[3]), tuple(rectPoints[0]), COLOR_RED, 2)
def writeLicensePlateCharsOnImage(image, licPlate):
#Henter oplysninger om billedet
sceneHeight, sceneWidth, sceneNumChannels = image.shape
plateHeight, plateWidth, plateNumChannels = licPlate.imgPlate.shape
#Instillinger omkring skrifttype og stรธrrelse
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = float(plateHeight) / 30.0
fontThickness = int(round(fontScale * 1.5))
textSize, baseline = cv2.getTextSize(licPlate.charsInPlate, fontFace, fontScale, fontThickness)
#Hent oplysninger om position i billedet
((plateCenterX, plateCenterY), (plateWidth, plateHeight), correctionAngleInDeg) = licPlate.locationInImg
#Konverter til integer
plateCenterX = int(plateCenterX)
plateCenterY = int(plateCenterY)
centerOfTextAreaX = int(plateCenterX)
#Bestem om teksten skal vรฆre under eller over nummerpladen
if plateCenterY < (sceneHeight * 0.75):
centerOfTextAreaY = int(round(plateCenterY)) + int(round(plateHeight * 1.6))
else:
centerOfTextAreaY = int(round(plateCenterY)) - int(round(plateHeight * 1.6))
textSizeWidth, textSizeHeight = textSize
lowerLeftTextOriginX = int(centerOfTextAreaX - (textSizeWidth / 2))
lowerLeftTextOriginY = int(centerOfTextAreaY + (textSizeHeight / 2))
#Tegn tekst pรฅ billedet
cv2.putText(image, licPlate.charsInPlate, (lowerLeftTextOriginX, lowerLeftTextOriginY), fontFace, fontScale, COLOR_YELLOW, fontThickness)
```
#### File: main/templatetags/app_filters.py
```python
from django import template
from datetime import date, timedelta
register = template.Library()
@register.filter(name='multiply')
def multiply(value1, value2):
return round(value1*value2)
```
|
{
"source": "JeppeKlitgaard/Archimedes",
"score": 2
}
|
#### File: Archimedes/archimedes/archimedes.py
```python
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from doit.doit_cmd import DoitMain
from archimedes.author import Author
from archimedes.config import SiteConfig
class ArchimedesSite:
doit_main: DoitMain | None = None
def __init__(
self,
site_config: SiteConfig,
project_active: bool,
) -> None:
self.site_config = site_config
self.project_active = project_active
def resolve_author(self, ident: str) -> Author:
pass
```
#### File: archimedes/utils/path.py
```python
from collections.abc import Generator, Iterable
from pathlib import Path
# https://stackoverflow.com/questions/6639394/what-is-the-python-way-to-walk-a-directory-tree
def walk_dirs(head_dirs: Iterable[Path]) -> Generator[Path, None, None]:
"""
Walks the subdirectories and files of a directory.
Args:
head_dir: Directory to walk from.
"""
for head_dir in head_dirs:
for p in head_dir.iterdir():
if p.is_dir():
yield from walk_dirs([p])
continue
yield p.resolve()
```
#### File: archimedes/utils/version.py
```python
from pathlib import Path
import toml
from semantic_version import Version
def get_version() -> Version:
"""
Returns the version of Archimedes.
"""
main_path = Path(__file__).resolve().parents[2] # /archimedes/
# Note this depends on where this module is located within Archimedes
pyproject = toml.load(main_path / "pyproject.toml")
return pyproject["tool"]["poetry"]["version"]
```
|
{
"source": "JeppeKlitgaard/barista",
"score": 2
}
|
#### File: barista/tests/test_verify.py
```python
import pytest
from barista.models import Match
def test_both_trigger_and_triggers():
with pytest.raises(ValueError):
Match.parse_obj(
{
"replace": "asd",
"trigger": "asd",
"triggers": ["asd", "abc"],
}
)
def test_neither_trigger_or_triggers():
with pytest.raises(ValueError):
Match.parse_obj({"replace": "asd"})
def test_trigger():
Match.parse_obj(
{
"replace": "asd",
"trigger": "ads",
}
)
def test_triggers():
Match.parse_obj(
{
"replace": "asd",
"triggers": ["ads", "ads2"],
}
)
```
|
{
"source": "JeppeKlitgaard/DDMTools",
"score": 3
}
|
#### File: ddmtools/utils/number.py
```python
from typing import Tuple
import numpy as np
def log_spaced(max_num: int, points_per_decate: int = 15) -> np.ndarray:
"""Generate an array of log spaced integers smaller than L"""
decades = np.log10(max_num)
series: np.ndarray = np.unique(
np.logspace(
start=0,
stop=decades,
num=int(decades * points_per_decate),
base=10,
endpoint=False,
).astype(int)
)
return series
def get_centre_matrix(big_matrix: np.ndarray, small_matrix_shape: Tuple[int, int]) -> np.ndarray:
assert big_matrix.ndim == 2
big_center = np.array(big_matrix.shape) // 2
corner = big_center - np.array(small_matrix_shape) // 2
aa = corner[0]
ba = corner[1]
ab = corner[0] + small_matrix_shape[0]
bb = corner[1] + small_matrix_shape[1]
small_matrix: np.ndarray = big_matrix[aa:ab, ba:bb]
return small_matrix
```
#### File: ddmtools/utils/progress.py
```python
from typing import Any, Optional
from joblib import Parallel
from tqdm.auto import tqdm
# https://stackoverflow.com/a/61900501/5036246
class ProgressParallel(Parallel): # type: ignore
def __init__(
self, use_tqdm: bool = True, total: Optional[int] = None, *args: Any, **kwargs: Any
):
self._use_tqdm = use_tqdm
self._total = total
super().__init__(*args, **kwargs)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
with tqdm(disable=not self._use_tqdm, total=self._total) as self._pbar:
return Parallel.__call__(self, *args, **kwargs)
def print_progress(self) -> None:
if self._total is None:
self._pbar.total = self.n_dispatched_tasks
self._pbar.n = self.n_completed_tasks
self._pbar.refresh()
```
#### File: ddmtools/utils/string.py
```python
def removeprefix(string: str, prefix: str) -> str:
if string.startswith(prefix):
return string[len(prefix) :]
return string
```
|
{
"source": "JeppeKlitgaard/DevIconsLatex",
"score": 3
}
|
#### File: JeppeKlitgaard/DevIconsLatex/compile.py
```python
import json
from pathlib import Path
from dataclasses import dataclass
from string import digits
ICOMOON_JSON_PATH = Path("devicons") / "icomoon.json"
TEMPLATE_STY_HEAD = r"""%% Generated by github.com/JeppeKlitgaard/DevIconsLatex
% Identify this package.
\NeedsTeXFormat{LaTeX2e}
\ProvidesPackage{devicon}[Provides some DevIcons]
% Requirements to use.
\usepackage{fontspec}
% Define shortcut to load the devicon
\newfontfamily{\DI}{devicon}
% Generic command displaying an icon by its name.
\newcommand*{\devicon}[1]{{
\DI\csname devicon@#1\endcsname
}}
"""
TEMPLATE_STY_TAIL = r"""
\endinput
"""
def remove_numbers(input: str) -> str:
remove_digits = str.maketrans("", "", digits)
return input.translate(remove_digits)
@dataclass
class Icon:
name: str
code: int
def code_as_latex_hex(self) -> str:
return '"' + hex(self.code)[2:].upper()
def name_as_latex_name(self) -> str:
name = "di"
name += remove_numbers("".join([x.title() for x in self.name.split("-")]))
return name
def to_latex_command(self) -> str:
command = r"\expandafter\def\csname devicon@"
command += self.name
command += r"\endcsname { \symbol{"
command += self.code_as_latex_hex()
command += "}} \def\\"
command += self.name_as_latex_name()
command += r" {{\DI\csname devicon@"
command += self.name
command += r"\endcsname}}"
return command
icons: list[Icon] = []
with open(ICOMOON_JSON_PATH) as f:
o = json.load(f)
for icon in o["icons"]:
i = Icon(icon["properties"]["name"], icon["properties"]["code"])
icons.append(i)
with open("devicons.sty", "w") as f:
f.write(TEMPLATE_STY_HEAD)
for icon in icons:
f.write(icon.to_latex_command() + "\n")
f.write(TEMPLATE_STY_TAIL)
```
|
{
"source": "jeppeliisberg/certbot-heroku",
"score": 3
}
|
#### File: certbot-heroku/certbot_heroku/configurator.py
```python
import argparse
import collections
import logging
import os
import subprocess
import time
import zope.component
import zope.interface
from acme import challenges
from certbot import errors
from certbot import interfaces
from certbot.display import util as display_util
from certbot.plugins import common
logger = logging.getLogger(__name__)
DEV_NULL = open(os.devnull, 'w')
@zope.interface.implementer(interfaces.IAuthenticator, interfaces.IInstaller)
@zope.interface.provider(interfaces.IPluginFactory)
class HerokuConfigurator(common.Plugin):
"""Heroku configurator."""
description = "Heroku SSL"
MORE_INFO = """\
Plugin that performs hostname validation using Heroku by
setting a config var, then installs the generated certificate with
Heroku SSL. It expects that your Heroku app is already
configured to serve the proper response when it receives the ACME
challenge request, and that the Heroku CLI is already installed
and functional. See https://github.com/gboudreau/certbot-heroku for
detailed set-up instructions."""
def more_info(self): # pylint: disable=missing-docstring,no-self-use
return self.MORE_INFO
@classmethod
def add_parser_arguments(cls, add):
add("app", "-H", default=[], action=_HerokuAppAction,
help="The name of your Heroku app. This can be specified multiple "
"times to handle different domains; each domain will use "
"the Heroku app that preceded it. For instance: `-H "
"MyApp -d example.com -d www.example.com -H "
"MyAppDev -d dev.example.com` (default: Ask)")
add("configvar", default="LETS_ENCRYPT_CHALLENGE", action=_HerokuConfigVarAction,
help="The name of the Heroku config var that needs to be set "
"for your Heroku app to correctly answer the ACME challenge. "
"(default: LETS_ENCRYPT_CHALLENGE)")
def get_chall_pref(self, domain): # pragma: no cover
# pylint: disable=missing-docstring,no-self-use,unused-argument
return [challenges.HTTP01]
def __init__(self, *args, **kwargs):
super(HerokuConfigurator, self).__init__(*args, **kwargs)
if not hasattr(self.config, self.dest('map')):
setattr(self.config, self.dest('map'), {})
self.performed = collections.defaultdict(set)
def prepare(self): # pylint: disable=missing-docstring
pass
def perform(self, achalls): # pylint: disable=missing-docstring
domains = []
for achall in achalls:
domains.append(achall.domain)
self._set_heroku_apps(domains)
self._check_heroku_apps_map()
return [self._perform_single(achall) for achall in achalls]
def _set_heroku_apps(self, domains):
if self.conf("app"):
heroku_app = self.conf("app")[-1]
for domain in domains:
if domain not in self.conf("map"):
self.conf("map").setdefault(domain, heroku_app)
else:
for domain in domains:
if domain not in self.conf("map"):
new_heroku_app = self._prompt_for_heroku_app(domain)
self.conf("map")[domain] = new_heroku_app
def _prompt_for_heroku_app(self, domain):
heroku_app = None
while heroku_app is None:
heroku_app = self._prompt_for_new_app(domain)
return heroku_app
def _prompt_for_new_app(self, domain):
display = zope.component.getUtility(interfaces.IDisplay)
while True:
code, heroku_app = display.input(
"Input the Heroku app name for {0}:".format(domain),
force_interactive=True)
if code == display_util.HELP:
# Displaying help is not currently implemented
return None
elif code == display_util.CANCEL:
return None
else: # code == display_util.OK
try:
return _validate_app(heroku_app)
except errors.PluginError as error:
display.notification(str(error), pause=False)
def _check_heroku_apps_map(self):
if not self.conf("map"):
raise errors.PluginError(
"Missing parts of Heroku configuration; please set "
"-H and --domains. Run with --help heroku for examples.")
def _perform_single(self, achall):
response, validation = achall.response_and_validation()
heroku_app = self.conf("map")[achall.domain]
config_value = "{0}={1}".format(self.conf("configvar"), validation.encode())
logger.info("Using the Heroku app %s for domain %s", heroku_app, achall.domain)
try:
# Check if we need to add the custom domain to the Heroku app
ps = subprocess.Popen([_get_heroku_cli(), "domains", "-a", heroku_app], stdout=subprocess.PIPE)
subprocess.check_call(['grep', achall.domain], stdin=ps.stdout, stdout=DEV_NULL)
ps.wait()
except subprocess.CalledProcessError:
# Need to add domain to Heroku app
subprocess.call([_get_heroku_cli(), "domains:add", achall.domain, "-a", heroku_app], stdout=DEV_NULL)
ps = subprocess.Popen([_get_heroku_cli(), "domains", "-a", heroku_app], stdout=subprocess.PIPE)
output = subprocess.check_output(['grep', achall.domain], stdin=ps.stdout)
ps.wait()
dns_host = output.decode("utf-8").replace("{0} ".format(achall.domain), "").strip()
raise errors.PluginError(
"Error: Domain {0} was missing from Heroku app {1} custom domains.\n"
"It was added, but you will need to update your DNS configuration to "
"add a CNAME for {0} that points to {2}".format(achall.domain, heroku_app, dns_host))
try:
# Check if we need to disable preboot
ps = subprocess.Popen([_get_heroku_cli(), "features", "-a", heroku_app], stdout=subprocess.PIPE)
subprocess.check_call(['grep', "+.*preboot"], stdin=ps.stdout, stdout=DEV_NULL)
ps.wait()
preboot_was_enabled = True
logger.warning(" Disabling preboot feature")
subprocess.call([_get_heroku_cli(), "features:disable", "preboot", "-a", heroku_app], stdout=DEV_NULL)
except subprocess.CalledProcessError:
# preboot is not enabled; all is good
preboot_was_enabled = False
logger.info(" Saving ACME challenge response in config var")
try:
subprocess.check_call([_get_heroku_cli(), "config:set", config_value, "-a", heroku_app], stdout=DEV_NULL)
except subprocess.CalledProcessError:
raise errors.PluginError(
"Failed to use 'heroku config:set' to set the config var {0} "
"for the Heroku app named {1}. Make sure the Heroku CLI is installed, "
"and that running 'heroku info -a {1}' works.".format(config_value, heroku_app)
)
logger.info(" Waiting for web dynos to restart...")
while True:
time.sleep(5) # Need to wait until Heroku finished restarting the web dynos
try:
ps = subprocess.Popen([_get_heroku_cli(), "ps", "web", "-a", heroku_app], stdout=subprocess.PIPE)
subprocess.check_call(['grep', "starting"], stdin=ps.stdout, stdout=DEV_NULL)
ps.wait()
# Dynos are still restarting; continue waiting...
except subprocess.CalledProcessError:
# Dynos finished restarting; let the ACME server do its validation
break
if preboot_was_enabled:
logger.warning(" Re-enabling preboot feature")
subprocess.call([_get_heroku_cli(), "features:enable", "preboot", "-a", heroku_app], stdout=DEV_NULL)
self.performed[heroku_app].add(achall)
return response
def cleanup(self, achalls): # pylint: disable=missing-docstring
for achall in achalls:
heroku_app = self.conf("map")[achall.domain]
logger.info("Clearing ACME challenge response config var from '%s'", heroku_app)
subprocess.check_call([_get_heroku_cli(), "config:unset", self.conf("configvar"), "-a", heroku_app],
stdout=DEV_NULL)
#####
# Installer
#####
# Entry point in main.py for installing cert
def deploy_cert(self, domain, cert_path, key_path, chain_path=None, fullchain_path=None):
# pylint: disable=unused-argument
if domain not in self.conf("map"):
self._set_heroku_apps([domain])
heroku_app = self.conf("map")[domain]
try:
# Check if we need to add the custom domain to the Heroku app
ps = subprocess.Popen([_get_heroku_cli(), "domains", "-a", heroku_app], stdout=subprocess.PIPE)
subprocess.check_call(['grep', domain], stdin=ps.stdout, stdout=DEV_NULL)
ps.wait()
except subprocess.CalledProcessError:
# Need to add domain to Heroku app
logger.info("Adding domain %s to Heroku app %s", domain, heroku_app)
subprocess.call([_get_heroku_cli(), "domains:add", domain, "-a", heroku_app], stdout=DEV_NULL)
try:
# Check if we need to add or update the SSL cert
ps = subprocess.Popen([_get_heroku_cli(), "certs", "-a", heroku_app], stdout=subprocess.PIPE)
subprocess.check_call(['grep', domain], stdin=ps.stdout, stdout=DEV_NULL)
ps.wait()
# Cert found; i.e. need to update
logger.info("Updating existing Heroku SSL endpoint... ")
try:
subprocess.check_call(
[_get_heroku_cli(), "certs:update", fullchain_path, key_path, "-a", heroku_app, "--confirm",
heroku_app],
stdout=DEV_NULL)
except subprocess.CalledProcessError:
raise errors.PluginError("'heroku certs:update' command failed. See error above.")
except subprocess.CalledProcessError:
# Need to add SSL; it wasn't setup before
logger.info("Configuring new Heroku SSL endpoint... ")
try:
subprocess.check_call([_get_heroku_cli(), "certs:add", fullchain_path, key_path, "-a", heroku_app],
stdout=DEV_NULL)
except subprocess.CalledProcessError:
raise errors.PluginError("'heroku certs:add' command failed. See error above.")
def get_all_names(self):
all_names = set()
for domain, app in self.conf("map").items():
if domain not in all_names:
all_names.add(domain)
return all_names
def supported_enhancements(self): # pylint: disable=no-self-use
return []
def enhance(self, domain, enhancement, options=None):
return
def save(self, title=None, temporary=False):
return
def rollback_checkpoints(self, rollback=1):
return
def recovery_routine(self):
return
def view_config_changes(self):
return
def config_test(self):
return
def restart(self):
return
def get_all_certs_keys(self):
return set()
class _HerokuConfigVarAction(argparse.Action):
"""Action class for parsing heroku_config_var."""
def __call__(self, parser, namespace, heroku_config_var, option_string=None):
if heroku_config_var:
namespace.heroku_config_var = heroku_config_var
class _HerokuAppAction(argparse.Action):
"""Action class for parsing heroku_app."""
def __init__(self, *args, **kwargs):
super(_HerokuAppAction, self).__init__(*args, **kwargs)
self._domain_before_app = False
def __call__(self, parser, namespace, heroku_app, option_string=None):
if self._domain_before_app:
raise errors.PluginError(
"If you specify multiple Heroku apps, "
"one of them must precede all domain flags.")
if getattr(namespace, 'certbot_heroku:heroku_app'):
# Apply previous app to all matched
# domains before setting the new app
prev_app = getattr(namespace, 'certbot_heroku:heroku_app')[-1]
for domain in namespace.domains:
if 'certbot_heroku:heroku_map' not in namespace:
setattr(namespace, 'certbot_heroku:heroku_map', {})
getattr(namespace, 'certbot_heroku:heroku_map').setdefault(domain, prev_app)
elif namespace.domains:
self._domain_before_app = True
getattr(namespace, 'certbot_heroku:heroku_app').append(_validate_app(heroku_app))
def _validate_app(heroku_app):
"""Validates and returns the Heroku app name.
:param str heroku_app: name of the Heroku app
:returns: name of the Heroku app
:rtype: str
"""
try:
subprocess.check_call([_get_heroku_cli(), "info", "-a", heroku_app], stdout=DEV_NULL)
except subprocess.CalledProcessError:
raise errors.PluginError(
"No Heroku app named {0} was found. Make sure you have the Heroku "
"CLI installed, and that running 'heroku info -a {0}' works.".format(heroku_app)
)
return heroku_app
def _get_heroku_cli():
try:
# Check if we need to add the custom domain to the Heroku app
heroku_cli = subprocess.check_output(["which", "heroku"], stderr=DEV_NULL).strip()
except subprocess.CalledProcessError:
# Looking for heroku CLI at the usual places
if os.path.isfile("/usr/local/heroku/bin/heroku"):
heroku_cli = "/usr/local/heroku/bin/heroku"
elif os.path.isfile("/usr/local/bin/heroku"):
heroku_cli = "/usr/local/bin/heroku"
else:
raise errors.PluginError("Error: can't find Heroku CLI")
return heroku_cli
```
|
{
"source": "JeppeLovstad/Bachelor-project-crypto",
"score": 3
}
|
#### File: JeppeLovstad/Bachelor-project-crypto/bla.py
```python
import numpy as np
import matplotlib.pyplot as plt
import os
delta_data = [] #x-axis in plot
time_data = [] #y-axis in plot
overall_time = []
overall_delta = []
subpltno = 111
def export_fig(name, fig):
my_path = os.path.join("", name)
fig.savefig(my_path)
with open('timeLog_goodstuff') as fp:
for line in fp:
old_d_7 = None
get_d6 = line.split(',')
delta_6 = get_d6[0] #we get delta_6 by seperating at the first comma and taking the first element
#then get delta_7 and plot vs its time in the plot
data = get_d6[1].split('-')
delta_7 = int(data[0])
delta_data.append(int(delta_6)) #add new delta_6-point ---- possibly value error of it thinks delta is a float
timepoint = float(data[1])
time_data.append(timepoint) #add corresponding time-point
if delta_7 != old_d_7:
#make new plot
fig = plt.figure(int(delta_7))
old_d_7 = delta_7
string = ''.join([str(delta_6), ",", str(delta_7)])
overall_delta.append(string)
overall_time.append(timepoint)
if int(delta_6) == 255:
plt.plot(delta_data,time_data, 'ro') #plot the data points - 'ro' means red dots
export_fig(str(delta_7), fig) #export the figure to .png - stolen from some ML-code from handin 1 (model_stats.py)
#old_d_7 = delta_7 #we are done - ready for next delta_6
delta_data = [] #reset data arrays
time_data = [] #ready for new data points
plt.plot(delta_data,time_data, 'ro') #plot the data points - 'ro' means red dots
export_fig(str(delta_7), fig) #export the figure to .png - stolen from some ML-code from handin 1 (model_stats.py)
last_fig = delta_7 +1
min_index = np.argmin(overall_time)
minimum = overall_time[min_index]
min_delta = overall_delta[min_index]
print("Minimum: ", min_delta, " : ", minimum)
average = np.average(overall_time)
median = np.median(overall_time)
allowed_error = 1.05
print("Average: ", average, "\n Median: ", median)
#overall_time[min_index] = np.inf
#sec_index = np.argmin(overall_time)
#sec_min = overall_time[sec_index]
#sec_delta = overall_delta[sec_index]
#print("Second smallest: ", sec_delta, " : ", sec_min)
#remove outliers:
clean_time = []
clean_delta = []
for i in range(len(overall_delta)):
if overall_time[i] <= average*allowed_error:
clean_time.append(overall_time[i])
clean_delta.append(overall_delta[i])
min_index = np.argmin(clean_time)
minimum = clean_time[min_index]
min_delta = clean_delta[min_index]
print("Without outliers")
print("Minimum: ", min_delta, " : ", minimum)
average = np.average(clean_time)
median = np.median(clean_time)
print("Average: ", average, "\n Median: ", median)
clean_delta_data = []
clean_time_data = []
with open('timeLog_goodstuff') as fp:
for line in fp:
old_d_7 = None
get_d6 = line.split(',')
delta_6 = get_d6[0] #we get delta_6 by seperating at the first comma and taking the first element
#then get delta_7 and plot vs its time in the plot
data = get_d6[1].split('-')
delta_7 = int(data[0])
timepoint = float(data[1])
if timepoint <= average*allowed_error:
clean_delta_data.append(int(delta_6)) #add new delta_6-point ---- possibly value error of it thinks delta is a float
clean_time_data.append(timepoint) #add corresponding time-point
if delta_7 != old_d_7:
#make new plot
fig = plt.figure(last_fig+delta_7)
old_d_7 = delta_7
if int(delta_6) == 255:
plt.plot(clean_delta_data,clean_time_data, 'go') #plot the data points - 'ro' means red dots
export_fig(str(delta_7)+ "_clean", fig) #export the figure to .png - stolen from some ML-code from handin 1 (model_stats.py)
#old_d_7 = delta_7 #we are done - ready for next delta_6
clean_delta_data = []
clean_time_data = []
plt.plot(delta_data,time_data, 'go') #plot the data points - 'ro' means red dots
export_fig(str(delta_7), fig) #export the figure to .png - stolen from some ML-code from handin 1 (model_stats.py)
```
|
{
"source": "jeppenodgaard/pyRFExplorer",
"score": 3
}
|
#### File: pyRFExplorer/pyrfe/plot.py
```python
import numpy as np
import matplotlib as mpl
mpl.use( 'TkAgg' )
font = {'family' : 'Arial',
'weight' : 'bold',
'size' : 8}
mpl.rc('font', **font)
import matplotlib.pyplot as plt
import matplotlib.image as img
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import Tkinter as tk
class LcdPlot( object ):
def __init__( self, rfe, frame ):
self.rfe = rfe
self.frame = frame
self.lcd_data = self.rfe.lcd_data
self.figure_lcd = mpl.figure.Figure( figsize=(2,1), dpi=100, frameon=False )
self.canvas_lcd = FigureCanvasTkAgg( self.figure_lcd, master=self.frame )
self.lcd_subplot = self.figure_lcd.add_subplot( '111' )
# style figure
self.figure_lcd.patch.set_alpha( 0.0 ) # makes background patch invisible
self.figure_lcd.patch.set_visible( False )
# style lcd
self.lcd_subplot.set_axis_off() # will show graph only, rest is transparent
self.img = self.lcd_subplot.matshow( np.random.random((64*2, 128*2)), cmap='Greys' ) # why does only random work, not zeros or ones?
#self.figure_lcd.tight_layout()
self.canvas_lcd.show()
self.canvas_lcd.get_tk_widget().grid()
def update( self ):
if not self.lcd_data.empty():
while not self.lcd_data.empty():
lcd = self.lcd_data.get()
lcd = np.kron( lcd, np.ones((2,2)) ) # scale by factor 2
self.img.set_array( lcd )
self.canvas_lcd.draw()
class SweepPlot( object ):
sweep_max = None
sweep_avg = None
sweep_min = None
sweep_fstart = 0
sweep_fstop = 0
sweep_flen = 0
def __init__( self, rfe, frame ):
self.rfe = rfe
self.frame = frame
self.sweep_data = self.rfe.sweep_data
self.figure = mpl.figure.Figure( figsize=(8,6), dpi=100 )
self.canvas = FigureCanvasTkAgg( self.figure, master=self.frame )
self.sweep_subplot = self.figure.add_subplot( '111' )
# style figure
self.figure.patch.set_color('black')
# style sweep
#pself.sweep_subplot.patch.set_alpha( 0.0 ) # makes graph background transparent
self.sweep_subplot.patch.set_visible( False ) # makes graph background invisible
self.sweep_subplot.xaxis.grid( color='gray', linestyle='dashed', alpha=0.3 )
self.sweep_subplot.yaxis.grid( color='gray', linestyle='dashed', alpha=0.3 )
self.sweep_subplot.set_frame_on( False ) # remove border around graph
#self.sweep_subplot.spines['bottom'].set_color( 'white' ) # bottom border color (it's off)
#self.sweep_subplot.spines['top'].set_color( 'white' ) # top border color (it's off)
#self.sweep_subplot.xaxis.label.set_color( 'white' ) # label color (we have none)
self.sweep_subplot.tick_params( axis='x', colors='white' ) # ticks and tick labels
self.sweep_subplot.tick_params( axis='y', colors='white' ) # ticks and tick labels
self.sweep_subplot.xaxis.get_major_formatter().set_scientific( False )
self.figure.tight_layout()
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
def update( self ):
if not self.sweep_data.empty():
# flush queue if we're too slow
while not self.sweep_data.empty():
freq, db, minmax = self.sweep_data.get()
if len(freq) != len(db):
print 'freq/db length mismatch'
return
if (self.sweep_avg is None) or (freq[0]!=self.sweep_fstart) or (freq[-1]!=self.sweep_fstop) or (len(freq)!=self.sweep_flen):
self.sweep_max = db
self.sweep_avg = db
self.sweep_min = db
self.sweep_fstart = freq[0]
self.sweep_fstop = freq[-1]
self.sweep_flen = len(freq)
mm = (np.min(self.sweep_min),np.max(self.sweep_max))
self.sweep_subplot.set_ylim( mm )
xoff = (self.sweep_fstop - self.sweep_fstart)*0.05
self.sweep_subplot.set_xlim( (self.sweep_fstart-xoff, self.sweep_fstop+xoff) )
try:
self.pminmax.remove()
self.pavg.remove()
self.pdb.remove()
except:
pass
self.pminmax = self.sweep_subplot.fill_between( freq, self.sweep_min, self.sweep_max, facecolor='white', alpha=0.1, linewidth=0.2 )
self.pavg, = self.sweep_subplot.plot( freq, self.sweep_avg, color='red', alpha=0.8, linewidth=1.5 )
#self.pdb, = self.sweep_subplot.plot( freq, db, color='#666666', linewidth=0.0, marker='.', alpha=0.5 )
self.pdb = self.sweep_subplot.fill_between( freq, self.sweep_avg, db, facecolor='#666666', linewidth=0.2, alpha=0.3 )
else:
self.sweep_max = np.maximum(self.sweep_max, db)
self.sweep_min = np.minimum(self.sweep_min, db)
self.sweep_avg = self.sweep_avg*0.95+db*0.05
mm = (np.min(self.sweep_min),np.max(self.sweep_max))
self.sweep_subplot.set_ylim( mm )
#self.pminmax.set_xdata( freq )
#self.pminmax.set_ydata( self.sweep_min, self.sweep_max )
#self.pavg.set_xdata( freq )
#self.pavg.set_ydata( self.sweep_avg )
#self.pdb.set_xdata( freq )
#self.pdb.set_ydata( db )
self.pminmax.remove()
self.pminmax = self.sweep_subplot.fill_between( freq, self.sweep_min, self.sweep_max, facecolor='white', alpha=0.1, linewidth=0.2 )
self.pavg.remove()
self.pavg, = self.sweep_subplot.plot( freq, self.sweep_avg, color='red', alpha=0.8, linewidth=1.5 )
self.pdb.remove()
#self.pdb, = self.sweep_subplot.plot( freq, db, color='#666666', linewidth=0.0, marker='.', alpha=0.5 )
self.pdb = self.sweep_subplot.fill_between( freq, self.sweep_avg, db, facecolor='#666666', linewidth=0.2, alpha=0.3 )
self.canvas.draw()
```
|
{
"source": "jepperaskdk/doctest",
"score": 3
}
|
#### File: pydoctest/reporters/text_reporter.py
```python
from pydoctest.configuration import Configuration, Verbosity
from pydoctest.reporters.reporter import Reporter
from pydoctest.validation import ClassValidationResult, FunctionValidationResult, ModuleValidationResult, ResultType, ValidationResult
SUCCESS = "OK"
FAILED = "FAIL"
SKIPPED = "SKIPPED"
class TextReporter(Reporter):
def get_output(self, result: ValidationResult) -> str:
"""Returns the text output by walking the ValidationResult object.
Args:
result (ValidationResult): The results from running Pydoctest.
Returns:
str: The output to be returned.
"""
output = ""
for module_result in result.module_results:
output += self.get_module_output(module_result)
return output
def get_module_output(self, result: ModuleValidationResult) -> str:
"""Returns the text output from the result object from walking the module.
Args:
result (ModuleValidationResult): The result from running Pydoctest on the module.
Returns:
str: The output of the module.
"""
output = ""
if result.fail_reason != "":
output = f"{result.fail_reason}\n"
for f_r in result.function_results:
output += self.get_function_output(f_r)
for c_r in result.class_results:
output += self.get_class_output(c_r)
return output
def get_function_output(self, result: FunctionValidationResult) -> str:
"""Returns the text output from the result object from the function.
Args:
result (FunctionValidationResult): The result from running Pydoctest on the function.
Returns:
str: The output from the function.
"""
if result.result == ResultType.OK:
if self.config.verbosity == Verbosity.SHOW_ALL:
return f"Function: {result.function} {SUCCESS}\n"
return ""
if result.result == ResultType.FAILED:
return f"Function: {result.function} {FAILED} | {result.fail_reason}\n"
if result.result == ResultType.NO_DOC and self.config.fail_on_missing_docstring:
return f"Function: {result.function} is missing a docstring\n"
return ""
def get_class_output(self, result: ClassValidationResult) -> str:
"""Returns the text output from the result object from the class.
Args:
result (ClassValidationResult): The result from running Pydoctest on the class.
Returns:
str: The output of the class and its functions.
"""
output = ""
for fn in result.function_results:
output += self.get_function_output(fn)
return output
```
#### File: tests/test_parsers/test_google_parser.py
```python
import pydoc
import pytest
from pydoctest.parsers.google_parser import GoogleParser
from pydoctest.exceptions import ParseException
import tests.test_class.incorrect_class
class TestGoogleParser():
def test_parse_exception_get_parameters(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.incorrect_class.IncorrectTestClass.func_parse_exception)
with pytest.raises(ParseException) as exc_info:
parser.get_parameters(doc, tests.test_class.incorrect_class)
def test_parse_exception_get_return_type(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.incorrect_class.IncorrectTestClass.func_parse_exception)
with pytest.raises(ParseException) as exc_info:
parser.get_return_type(doc, tests.test_class.incorrect_class)
def test_get_exceptions_raised(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.incorrect_class.IncorrectTestClass.func_parse_exception)
with pytest.raises(ParseException) as exc_info:
parser.get_exceptions_raised(doc)
def test_empty_func(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.correct_class.CorrectTestClass.empty_func)
arguments = parser.get_parameters(doc, tests.test_class.correct_class)
assert len(arguments) == 0, f"GoogleParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_class.correct_class)
assert return_type == type(None), f"GoogleParser failed assertion"
def test_func_returns_none(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.correct_class.CorrectTestClass.func_returns_none)
arguments = parser.get_parameters(doc, tests.test_class.correct_class)
assert len(arguments) == 1, f"GoogleParser failed assertion"
assert arguments[0].type == int, f"GoogleParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_class.correct_class)
assert return_type == type(None), f"GoogleParser failed assertion"
def test_func_returns_int(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.correct_class.CorrectTestClass.func_returns_int)
arguments = parser.get_parameters(doc, tests.test_class.correct_class)
assert len(arguments) == 0, f"GoogleParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_class.correct_class)
assert return_type == int, f"GoogleParser failed assertion"
def test_func_has_arg_returns_arg(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.correct_class.CorrectTestClass.func_has_arg_returns_arg)
arguments = parser.get_parameters(doc, tests.test_class.correct_class)
assert len(arguments) == 1, f"GoogleParser failed assertion"
assert arguments[0].type == int, f"GoogleParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_class.correct_class)
assert return_type == float, f"GoogleParser failed assertion"
def test_func_has_raises_doc(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.correct_class.CorrectTestClass.func_has_raises_doc)
arguments = parser.get_parameters(doc, tests.test_class.correct_class)
assert len(arguments) == 1, f"GoogleParser failed assertion"
assert arguments[0].type == int, f"GoogleParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_class.correct_class)
assert return_type == int, f"GoogleParser failed assertion"
def test_func_with_multiline_summary(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.correct_class.CorrectTestClass.func_with_multiline_summary)
arguments = parser.get_parameters(doc, tests.test_class.correct_class)
assert len(arguments) == 1, f"GoogleParser failed assertion"
assert arguments[0].type == int, f"GoogleParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_class.correct_class)
assert return_type == int, f"GoogleParser failed assertion"
def test_get_summary_multiline_summary(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.correct_class.CorrectTestClass.func_with_multiline_summary)
summary = parser.get_summary(doc, tests.test_class.correct_class)
assert summary is not None
assert len(summary) > 0, f"GoogleParser failed assertion"
assert(len([x for x in summary if x == '\n']) > 1), f"GoogleParser failed assertion"
def test_get_summary_empty_summary(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.correct_class.CorrectTestClass.func_no_summary)
arguments = parser.get_parameters(doc, tests.test_class.correct_class)
assert len(arguments) == 0, f"GoogleParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_class.correct_class)
assert return_type == type(None), f"GoogleParser failed assertion"
summary = parser.get_summary(doc, tests.test_class.correct_class)
assert summary is None, f"GoogleParser failed assertion"
def test_func_with_raise_and_args_and_return(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.raises_class.RaisesClass.func_with_raise_and_args_and_return)
actual_exceptions = parser.get_exceptions_raised(doc)
expected_exceptions = [ 'RuntimeError', 'ValueError', 'IndexError' ]
assert len(expected_exceptions) == len(actual_exceptions)
intersection = set(expected_exceptions) - set(actual_exceptions)
assert len(intersection) == 0
def test_func_with_raise_and_args(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.raises_class.RaisesClass.func_with_raise_and_args)
actual_exceptions = parser.get_exceptions_raised(doc)
expected_exceptions = [ 'RuntimeError', 'ValueError', 'IndexError' ]
assert len(expected_exceptions) == len(actual_exceptions)
intersection = set(expected_exceptions) - set(actual_exceptions)
assert len(intersection) == 0
def test_func_with_raise(self) -> None:
parser = GoogleParser()
doc = pydoc.getdoc(tests.test_class.raises_class.RaisesClass.func_with_raise)
actual_exceptions = parser.get_exceptions_raised(doc)
expected_exceptions = [ 'RuntimeError', 'ValueError', 'IndexError' ]
assert len(expected_exceptions) == len(actual_exceptions)
intersection = set(expected_exceptions) - set(actual_exceptions)
assert len(intersection) == 0
```
#### File: tests/test_parsers/test_numpy_parser.py
```python
import pydoc
from typing import Any, Dict
import pytest
from pydoctest.parsers.numpy_parser import NumpyParser
from pydoctest.exceptions import ParseException
import tests.test_parsers.numpy_class
class TestNumpyParser():
def test_parse_exception_get_parameters(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.IncorrectTestClass.func_parse_exception)
with pytest.raises(ParseException) as exc_info:
parser.get_parameters(doc, tests.test_parsers.numpy_class)
def test_parse_exception_get_return_type(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.IncorrectTestClass.func_parse_exception)
with pytest.raises(ParseException) as exc_info:
parser.get_return_type(doc, tests.test_parsers.numpy_class)
def test_empty_func(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.empty_func)
arguments = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(arguments) == 0, f"NumpyParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == type(None), f"NumpyParser failed assertion"
def test_func_returns_none(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_returns_none)
arguments = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(arguments) == 1, f"NumpyParser failed assertion"
assert arguments[0].type == int, f"NumpyParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == type(None), f"NumpyParser failed assertion"
def test_func_returns_int(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_returns_int)
arguments = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(arguments) == 0, f"NumpyParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == int, f"NumpyParser failed assertion"
def test_func_returns_int_name_type(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_returns_int_name_type)
arguments = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(arguments) == 0, f"NumpyParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == int, f"NumpyParser failed assertion"
def test_func_has_arg_returns_arg(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_has_arg_returns_arg)
arguments = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(arguments) == 1, f"NumpyParser failed assertion"
assert arguments[0].type == int, f"NumpyParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == float, f"NumpyParser failed assertion"
def test_func_has_raises_doc(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_has_raises_doc)
arguments = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(arguments) == 1, f"NumpyParser failed assertion"
assert arguments[0].type == int, f"NumpyParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == int, f"NumpyParser failed assertion"
def test_func_with_multiline_summary(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_with_multiline_summary)
arguments = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(arguments) == 1, f"NumpyParser failed assertion"
assert arguments[0].type == int, f"NumpyParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == int, f"NumpyParser failed assertion"
def test_get_summary_multiline_summary(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_with_multiline_summary)
summary = parser.get_summary(doc, tests.test_parsers.numpy_class)
assert summary is not None
assert len(summary) > 0, f"NumpyParser failed assertion"
assert(len([x for x in summary if x == '\n']) > 1), f"NumpyParser failed assertion"
def test_get_summary_empty_summary(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_no_summary)
arguments = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(arguments) == 0, f"NumpyParser failed assertion"
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == type(None), f"NumpyParser failed assertion"
summary = parser.get_summary(doc, tests.test_parsers.numpy_class)
assert summary is None, f"NumpyParser failed assertion"
def test_func_with_raise_and_args_and_return(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.RaisesClass.func_with_raise_and_args_and_return)
actual_exceptions = parser.get_exceptions_raised(doc)
expected_exceptions = [ 'RuntimeError', 'ValueError', 'IndexError' ]
assert len(expected_exceptions) == len(actual_exceptions)
intersection = set(expected_exceptions) - set(actual_exceptions)
assert len(intersection) == 0
def test_func_with_raise_and_args(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.RaisesClass.func_with_raise_and_args)
actual_exceptions = parser.get_exceptions_raised(doc)
expected_exceptions = [ 'RuntimeError', 'ValueError', 'IndexError' ]
assert len(expected_exceptions) == len(actual_exceptions)
intersection = set(expected_exceptions) - set(actual_exceptions)
assert len(intersection) == 0
parameters = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(parameters) == 2
assert parameters[0].name == 'a'
assert parameters[0].type == int
assert parameters[1].name == 'b'
assert parameters[1].type == float
def test_func_with_raise(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.RaisesClass.func_with_raise)
actual_exceptions = parser.get_exceptions_raised(doc)
expected_exceptions = [ 'RuntimeError', 'ValueError', 'IndexError' ]
assert len(expected_exceptions) == len(actual_exceptions)
intersection = set(expected_exceptions) - set(actual_exceptions)
assert len(intersection) == 0
def test_func_with_generics(self) -> None:
parser = NumpyParser()
doc = pydoc.getdoc(tests.test_parsers.numpy_class.CorrectTestClass.func_with_generics)
parameters = parser.get_parameters(doc, tests.test_parsers.numpy_class)
assert len(parameters) == 1
assert parameters[0].type == Dict[str, Any]
assert parameters[0].name == 'a_a'
return_type = parser.get_return_type(doc, tests.test_parsers.numpy_class)
assert return_type == Dict[str, Any]
```
|
{
"source": "jeppe-style/code-quality-and-popularity",
"score": 3
}
|
#### File: jeppe-style/code-quality-and-popularity/static_code_metrics.py
```python
import json
import os
import subprocess
import git
import pandas
import shutil
from git import Repo
from shared_constants import data_dir, repo_candidates_filename
temp_repo_dir = "temp-repo"
code_metrics_file = "code-metrics.csv"
code_metrics_folder = "code-metrics"
def read_json(filename):
print("reading result from {}/{}".format(data_dir, filename))
with open("{}/{}.json".format(data_dir, filename), "r") as file:
data = json.load(file)
return data
def main():
# for all repos
candidate_repos = read_json(repo_candidates_filename)
# create the folder where to store the code metrics
if not os.path.exists("{}/{}".format(data_dir, code_metrics_folder)):
os.makedirs("{}/{}".format(data_dir, code_metrics_folder))
metrics = None
for i in range(0, len(candidate_repos)):
# for i in range(0, 10):
# create the folder where to store the repos temporarily
if not os.path.exists(temp_repo_dir):
os.makedirs(temp_repo_dir)
candidate_repo = candidate_repos[i]
# download repo
git_url = candidate_repo["html_url"]
repo_name = candidate_repo["name"]
print("============================================")
print("cloning repository {}".format(repo_name))
try:
Repo.clone_from(git_url, temp_repo_dir)
except git.exc.GitCommandError:
print("error cloning repository")
continue
# calculate code metrics on last snapshot
print("calculating code metrics")
repo_id = candidate_repo["id"]
output_file = "{}/{}/{}-{}".format(data_dir, code_metrics_folder, repo_id, code_metrics_file)
if not compute_metrics(output_file):
continue
temp_frame = prepare_metrics_data(candidate_repo, output_file, repo_id, repo_name)
if metrics is None:
metrics = temp_frame
else:
metrics = pandas.concat([metrics, temp_frame], ignore_index=True)
print("save data to csv")
metrics.to_csv("{}/final-{}".format(data_dir, code_metrics_file))
shutil.rmtree(temp_repo_dir)
def compute_metrics(output_file):
# e.g "Exception in thread "main" java.lang.NullPointerException..."
# java -jar ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar temp-repo/ data/36057260-code-metrics.csv
# subprocess.run("java -jar ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar {} {}"
# .format(temp_repo_dir, output_file), shell=True)
try:
subprocess.run(
" ".join(
["java", "-jar", "ck/ck-0.2.1-SNAPSHOT-jar-with-dependencies.jar", temp_repo_dir, output_file]
),
shell=True, check=True,
timeout=60 * 10
)
except subprocess.CalledProcessError:
print("exception analysing the repository - skipping")
shutil.rmtree(temp_repo_dir)
return False
except subprocess.TimeoutExpired:
print("timeout analysing the repository - skipping")
shutil.rmtree(temp_repo_dir)
return False
return True
def prepare_metrics_data(candidate_repo, output_file, repo_id, repo_name):
# analyse code quality vs stars and num contributors
print("preparing data")
metrics_raw = pandas.read_csv(output_file)
metrics_raw.pop("file")
metrics_raw.pop("class")
metrics_raw.pop("type")
# for each metric compute mean, median, Q1, and Q3
mean = metrics_raw.mean().rename(lambda x: "average_{}".format(x))
median = metrics_raw.median().rename(lambda x: "median_{}".format(x))
q1 = metrics_raw.quantile(q=0.25).rename(lambda x: "Q1_{}".format(x))
q3 = metrics_raw.quantile(q=0.75).rename(lambda x: "Q3_{}".format(x))
temp_frame = pandas.DataFrame(pandas.concat([mean, median, q1, q3])).T
temp_frame['id'] = repo_id
temp_frame['name'] = repo_name
temp_frame['stars'] = candidate_repo["stargazers_count"]
temp_frame['contributors_total'] = candidate_repo["num_contributors"]
return temp_frame
if __name__ == '__main__':
main()
```
|
{
"source": "jeppeter/extargsparse",
"score": 3
}
|
#### File: test/tstextargsparse/opthandle.py
```python
import tempfile
import sys
import os
_extargs_parent_dir = os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)),'..','..'))
if _extargs_parent_dir not in sys.path:
_temp_path = sys.path
sys.path = [_extargs_parent_dir]
sys.path.extend(_temp_path)
import extargsparse
def pair_parse(args,validx,keycls,params):
if (validx + 1) >= len(params):
raise Exception('need 2 args for [++pair|+p]')
val = getattr(args,keycls.optdest,None)
if val is None:
val = []
val.append(params[validx])
val.append(params[(validx+1)])
setattr(args,keycls.optdest,val)
return 2
def pair_help(keycls):
return '[first] [second]'
def single_2_jsonfunc(args,keycls,value):
if not isinstance(value,list):
raise Exception('not list value')
if (len(value) % 2) != 0:
raise Exception('not even sized')
setvalue = []
i = 0
while i < len(value):
setvalue.append(value[i])
i += 2
setattr(args,keycls.optdest,setvalue)
return
def main():
commandline='''
{
"verbose|v" : "+",
"pair|p!optparse=pair_parse;opthelp=pair_help!" : [],
"even|e!jsonfunc=single_2_jsonfunc!" : [],
"clr_CA_name" : null,
"$" : "*"
}
'''
options = extargsparse.ExtArgsOptions()
options.longprefix = '++'
options.shortprefix = '+'
options.jsonlong = 'jsonfile'
options.helplong = 'usage'
options.helpshort = '?'
options.flagnochange = True
parser = extargsparse.ExtArgsParse(options)
parser.load_command_line_string(commandline)
args = parser.parse_command_line()
print('verbose [%d]'%(args.verbose))
print('pair (%s)'%(args.pair))
print('args (%s)'%(args.args))
print('clr_CA_name (%s)'%(args.clr_CA_name))
print('event (%s)'%(args.even))
return
if __name__ == '__main__':
main()
```
#### File: test/tstextargsparse/testver.py
```python
import sys
import os
def _reload_extargs_path(curpath):
_extargs_init_py = os.path.join(curpath,'extargsparse','__init__.py')
if os.path.exists(_extargs_init_py):
if curpath != sys.path[0]:
if curpath in sys.path:
sys.path.remove(curpath)
oldpath=sys.path
sys.path = [curpath]
sys.path.extend(oldpath)
return
_reload_extargs_path(os.path.abspath(os.path.dirname(__file__)))
_reload_extargs_path(os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)),'..','..')))
import extargsparse
import rtools
def main():
print('extargsparse version %s'%(extargsparse.__version__))
print('extargsparse version info %s'%(repr(extargsparse.__version_info__)))
if __name__ == '__main__':
main()
```
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.