metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jieyanzhu/codes-effective-computation-in-physics",
"score": 3
} |
#### File: codes-effective-computation-in-physics/chap_17/a_list_mean.py
```python
import pdb
def mean(nums):
top = sum(nums)
bot = len(nums)
return float(top) / float(bot)
if __name__ == "__main__":
pdb.set_trace()
a_list = [1, 2, 3, 4, 5, 6, 10, "one hundred"]
mean(a_list)
```
#### File: codes-effective-computation-in-physics/chap_17/elementary.py
```python
from particle import Particle
import numpy
class ElementaryParticle(Particle):
roar = "I am an Elementary Particle!"
def __init__(self, spin):
self.s = spin
self.is_fermion = bool(spin % 1.0)
self.is_boson = not self.is_fermion
```
#### File: codes-effective-computation-in-physics/chap_18/mod.py
```python
import numpy as np
import os
def fib(n):
if n == 0 or n == 1:
return 1
else:
return fib(n - 1) + fib(n - 2)
def sinc2d(x, y):
if x == 0.0 and y == 0.0:
return 1.0
elif x == 0.0:
return np.sin(y) / y
elif y == 0.0:
return np.sin(x) / x
else:
return (np.sin(x) / x) * (np.sin(y) / y)
def f():
files = os.listdir('.')
if 'no.txt' not in files:
with open('yes.txt', 'w') as fhandle:
fhandle.write('42')
return None
def a(x):
return x + 1
def b(x):
return 2 * x
def c(x):
return b(a(x))
```
#### File: codes-effective-computation-in-physics/chap_18/test_fib.py
```python
from nose.tools import assert_equal
from mod import fib
def test_fib0():
# test edge 0
obs = fib(0)
assert_equal(1, obs)
def test_fib1():
# test edge 1
obs = fib(1)
assert_equal(1, obs)
def test_fib6():
# test regular point
obs = fib(6)
assert_equal(13, obs)
``` |
{
"source": "jieyaren/hello-world",
"score": 4
} |
#### File: hello-world/py/5_LPS.py
```python
class Solution:
# Manacher algorithm
# http://en.wikipedia.org/wiki/Longest_palindromic_substring
def longestPalindrome(self, s):
# Transform S into T.
# For example, S = "abba", T = "^#a#b#b#a#$".
# ^ and $ signs are sentinels appended to each end to avoid bounds checking
T = '#'.join('^{}$'.format(s))
n = len(T)
P = [0] * n
C = R = 0
#全篇目的遍历每个i的回文半径P[i],以节省时间为目的。最后enum P[i]。
#R的作用应该也是为了设置右边界节省时间。不是单纯为了寻找最大半径。
for i in range(1, n - 1):
P[i] = (R > i) and min(R - i, P[2 * C - i]) # equals to i' = C - (i-C)
#python 中的and从左到右计算表达式,若所有值均为真,则返回最后一个值,若存在假,返回第一个假值。
#or也是从左到有计算表达式,返回第一个为真的值。
#求出P[i]的初始至小值。这一步P[i]=0也能AC。加这步赋初值节省时间。
# Attempt to expand palindrome centered at i
while T[i + 1 + P[i]] == T[i - 1 - P[i]]:
P[i] += 1 #若回文则当前i的回文半径+1
# If palindrome centered at i expand past R,
# adjust center based on expanded palindrome.
if i + P[i] > R: #更新右边界R和中心C,需要再理解。
C, R = i, i + P[i]
# Find the maximum element in P.
maxLen, centerIndex = max((n, i) for i, n in enumerate(P))
return s[(centerIndex - maxLen) // 2: (centerIndex + maxLen) // 2]
```
#### File: hello-world/py/6_ZZC.py
```python
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
""" # 整体思路是分成numRows个字符串,每个字符串在结尾添加元素。
L = [''] * numRows;
index = 0;
step = 1;
if numRows == 1 or numRows >= len(s):
return s;
for c in s:
L[index] += c;
if index == numRows - 1:
step = -1; #在逆向-1过程中应设置index=0时改变方向。
elif index == 0:
step = 1;
index += step;
return ''.join(L);
```
#### File: hello-world/py/chap1-12.py
```python
import re
def test_assert(l):
assert(l[0])
print l[1] +'('+ l[2] + ') pass'
'''
1.11 匹配邮箱
'''
def mail_check(s):
matcher='\w+<EMAIL>\.]+.<EMAIL>'
m=re.match(matcher,s)
return [m.group()!=None,mail_check.__name__,s]
test_assert(mail_check("<EMAIL>"))
test_assert(mail_check("<EMAIL>"))
test_assert(mail_check("<EMAIL>"))
#test_assert(mail_check("a@ <EMAIL>"))
#test_assert(mail_check("a @<EMAIL>"))
#test_assert(mail_check("_<EMAIL>"))
#test_assert(mail_check(" @b.com"))
'''
1.10 匹配所有复数字符串
1.7-1.9
匹配所有整数长整数浮点数不过是\d控制字数版本就不做了
[0-9]{1-9}+
'''
def complex_check(s):
matcher='\d*(-\d+j)?' #'\d*+(-|+\d*j)'
m=re.match(matcher,s)
return [m.group()!=None,complex_check.__name__,s]
test_assert(complex_check("1-5j"))
test_assert(complex_check("1+5j"))
test_assert(complex_check("1 + 5j"))
test_assert(complex_check("1"))
test_assert(complex_check("5j"))
'''
1.6 匹配网址 1.12类似,不过是扩展域名。就不考虑了。
'''
def net_check(s):
matcher='^w{3}\.\w+\.com$'
m=re.match(matcher,s)
return [m.group()!=None,net_check.__name__,s]
test_assert(net_check("www.a.com"))
test_assert(net_check("www._a.com"))
test_assert(net_check("www.a_.com"))
#test_assert(net_check("www. a.com"))
'''
1.5 匹配数字空格英文
'''
def street(s):
matcher='[0-9]+(\s[a-zA-Z]+)+'
m=re.match(matcher,s)
return [m.group()!=None,street.__name__,s]
test_assert(street("12345 abc"))
test_assert(street('1 a b b c'))
#test_assert(street('a'))
#test_assert(street('1 '))
#test_assert(street('1a'))
'''
1.4 标识符?
'''
def name(s):
matcher='_*|[a-zA-Z]*\w+'
m=re.match(matcher,s)
return [m.group()!=None,name.__name__,s]
#test_assert(name('233'))
test_assert(name('w_w'))
test_assert(name('name'))
#test_assert(name(''))
#test_assert(name(' '))
_1 = 2
print _1
test_assert(name('_1'))
test_assert(name('_d'))
test_assert(name('_1_d'))
'''
1.3 match a\s or a\.
'''
def dot(s):
matcher = '[a-zA-Z]+[\s|\.]'
m=re.match(matcher,s)
#print m.group()+'|'
return [m.group()!=None,dot.__name__,s,]
test_assert(dot('a b'))
#test_assert(dot('1 b'))
test_assert(dot('a '))
#test_assert(dot(''))
#test_assert(dot('.'))
test_assert(dot('a.'))
#test_assert(dot(' aab.'))
#test_assert(dot('.b.'))
#test_assert(dot(' b'))
'''
1.2 match a b
'''
def blank(s):
matcher = '\w+\s'
m=re.match(matcher,s)
return [m.group()!=None,blank.__name__,s,]
test_assert(blank('a b'))
test_assert(blank('aaaaa b'))
test_assert(blank('a bbbbbbb'))
test_assert(blank('aaaaaa bbbbbbb'))
#test_assert(blank('aaa'))
'''
1.1 bat bit but hat hit hut
([bh][aiu]t)
'''
def aiu(s):
matcher = '[bh][aiu]t'
m=re.match(matcher,s)
return [m.group()!=None,aiu.__name__,s]
test_assert(aiu('bat'))
test_assert(aiu('but'))
test_assert(aiu('bit'))
test_assert(aiu('hat'))
test_assert(aiu('hut'))
test_assert(aiu('hit'))
#test_assert(aiu('abc'))
```
#### File: hello-world/py/getip.py
```python
import socket
import struct
import fcntl
def getip(ethname):
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ethname[:15])
)[20:24])
if __name__=='__main__':
print getip('eth0')
```
#### File: hello-world/py/lintcode_478.py
```python
class Calculator:
"""
@param a: An integer
@param operator: A character, +, -, *, /.
@param b: An integer
@return: The result
"""
def calculate(self, a, operator, b):
# write your code here
switcher = {
"+": lambda a,b:a+b,
"-": lambda a,b:a-b,
"*": lambda a,b:a*b,
"/": lambda a,b:a/b
}
return switcher.get(operator)(a,b)
```
#### File: hello-world/py/Sum of Multiples of Whole Numbers.py
```python
a=[]
b=[]
def multiples_of_3(n):
i=1
for i in range(n): # stops at 1 less than the value passed to `range`
m=i*3
if(m<n):
a.append(m)
def multiples_of_5(n):
j=1
for j in range(n): # could change to 201 and still work
k=j*5
if(k<n):
b.append(k)
if __name__ == "__main__":
n=input()
multiples_of_3(n)
multiples_of_5(n)
print sum(set(a+b))
``` |
{
"source": "jieyu97/mvpp",
"score": 2
} |
#### File: jieyu97/mvpp/igep_model_test_temperature.py
```python
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.losses import Loss
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pandas as pd
from scoringRules import es_sample
from sklearn import preprocessing
plt.close("all")
# from hyperopt.pyll.base import scope
# from hyperopt import Trials, tpe, fmin, hp, STATUS_OK
# import pickle
import tensorflow.compat.v1 as tfv
tfv.disable_v2_behavior()
# fix random seed for reproducibility
seed = 8
np.random.seed(seed)
# set model hyper parameters
N_SAMPLES_TRAIN = 50 # number of samples drawn during training
EPOCHS = 50 # max epochs
# misc options
VERBOSE = 2 # determines how much info is given about fitting
PLOT_LEARNING_CURVE = False # if True, the model's learning curve is plotted
N_SAMPLES_TEST = 50 # 1000 # number of samples used for computung scores
DIM = 10 # dimension of target values
TAUS = np.linspace(1,99,99)/100 # which quantiles to evaluate
PLOT_RESULTS = False
# Model 3.2.3 hyperparameters
latent_dist = 'uniform'
learningrate = 0.01
epochs = EPOCHS
# important hp
batch_size = 64
layer_number = 2
nodes_number = 25
dim_latent = 20
path = '/home/chen_jieyu/IGEP/ens_fc_t2m_complete.feather'
t2m_ens_complete = pd.read_feather(path)
path_add = '/home/chen_jieyu/IGEP/tem_additional_predictors.feather'
t2m_add_complete = pd.read_feather(path_add)
dist_samples = pd.read_csv('/home/chen_jieyu/IGEP/dist_10samples.csv', header=None)
# define energy score
def energy_score(y_true, S):
"""
Computes energy score:
Parameters
----------
y_true : tf tensor of shape (BATCH_SIZE, D, 1)
True values.
S : tf tensor of shape (BATCH_SIZE, D, N_SAMPLES)
Predictive samples.
Returns
-------
tf tensor of shape (BATCH_SIZE,)
Scores.
"""
# y_true = tf.cast(y_true, tf.float32)
# S = tf.cast(S, tf.float32)
beta=1
n_samples = S.shape[-1]
def expected_dist(diff, beta):
return K.sum(K.pow(K.sqrt(K.sum(K.square(diff), axis=-2)+K.epsilon()), beta),axis=-1)
es_1 = expected_dist(y_true - S, beta)
es_2 = 0
for i in range(n_samples):
es_2 = es_2 + expected_dist(K.expand_dims(S[:,:,i]) - S, beta)
# es_1 = tf.cast(es_1, tf.float32)
# es_2 = tf.cast(es_2, tf.float32)
n_samples = tf.cast(n_samples, tf.float32)
es = es_1/n_samples - es_2/(2*n_samples**2)
es = tf.cast(es, tf.float32)
return es
# subclass tensorflow.keras.losses.Loss
class EnergyScore(Loss):
def call(self, y_true, S):
return energy_score(y_true, S)
for k in [10,20,30,40,50,60,70,80,90]:
tfv.reset_default_graph()
station_sample = dist_samples.iloc[k,]
ens_sample = t2m_ens_complete[t2m_ens_complete['station'].isin(station_sample)]
dateobs_count = ens_sample.groupby('date')['date'].count()
dates = dateobs_count.index
used_dates = dates[dateobs_count == DIM]
used_ens_sample = ens_sample[ens_sample['date'].isin(used_dates)]
add_sample = t2m_add_complete[t2m_add_complete['station'].isin(station_sample)]
used_add_sample = add_sample[add_sample['date'].isin(used_dates)]
# t2m data
t2m_obs = used_ens_sample['obs']
t2m_obs.index = used_ens_sample['date']
data_obs = t2m_obs
# set initial training and test dates
train_dateindex = ((t2m_obs.index.year != 2016) & (t2m_obs.index.year != 2015))
val_dateindex = (t2m_obs.index.year == 2015)
test_dateindex = (t2m_obs.index.year == 2016)
# Predictions
t2m_ens = used_ens_sample.iloc[:, 3:53]
t2m_ens.index = used_ens_sample['date']
data_ens = t2m_ens
# added predictors
add_dim = 37
t2m_add = used_add_sample.loc[:, ["d2m_mean", "d2m_var",
"q_pl850_mean", "q_pl850_var",
"tcc_mean", "tcc_var",
"u_pl850_mean", "u_pl850_var",
"v_pl850_mean", "v_pl850_var",
"sshf_mean", "sshf_var",
"slhf_mean", "slhf_var",
"u10_mean", "u10_var",
"v10_mean", "v10_var",
"cape_mean", "cape_var",
"sp_mean", "sp_var",
"u_pl500_mean", "u_pl500_var",
"v_pl500_mean", "v_pl500_var",
"gh_pl500_mean", "gh_pl500_var",
"ssr_mean", "ssr_var",
"str_mean", "str_var",
"lat", "lon",
"alt", "orog",
"sin_yday"]]
t2m_add.index = used_add_sample['date']
data_add = t2m_add
# get training and test data
obser = data_obs.copy()
pred = data_ens.copy()
addpre = data_add.copy()
dim = DIM
pred_mu = pred.mean(axis = 1)
pred_sigma = pred.var(axis = 1)
######### standardization old
scaler = preprocessing.StandardScaler().fit(obser[train_dateindex].values.reshape(-1,1)) #= standardise data ???
stand_obs = scaler.transform(obser.values.reshape(-1,1)).reshape(-1)
obser.iloc[:] = stand_obs
for i in range(pred.shape[1]):
pred.iloc[:,i] = scaler.transform(pred.iloc[:,i].values.reshape(-1,1))
ens_mu = pred.mean(axis=1)
ens_sigma = pred.var(axis=1)
ens_max = pred.max(axis=1)
ens_min = pred.min(axis=1)
ens_spread = ens_max - ens_min
# scaler_mu = preprocessing.StandardScaler().fit(pred_mu[train_dateindex].values.reshape(-1,1))
# ens_mu = scaler_mu.transform(pred_mu.values.reshape(-1,1))
# scaler_sigma = preprocessing.StandardScaler().fit(pred_sigma[train_dateindex].values.reshape(-1,1))
# ens_sigma = scaler_sigma.transform(pred_sigma.values.reshape(-1,1))
for i in range(addpre.shape[1]-1):
scaler_i = preprocessing.StandardScaler().fit(addpre.iloc[train_dateindex,i].values.reshape(-1,1))
addpre.iloc[:,i] = scaler_i.transform(addpre.iloc[:,i].values.reshape(-1,1))
# ######### standardization new
# obser_copy = data_obs.copy()
# pred_copy = data_ens.copy()
# addpre_copy = data_add.copy()
#
# scale = np.max(np.absolute(obser_copy[train_dateindex].values))
# obser.iloc[:] = obser_copy.values / scale
# for i in range(pred.shape[1]):
# pred.iloc[:,i] = pred_copy.iloc[:,i].values / scale
#
# for i in range(addpre.shape[1]-1):
# scale_i = np.max( np.absolute( addpre_copy.iloc[train_dateindex,i].values ) )
# addpre.iloc[:,i] = addpre_copy.iloc[:,i].values / scale_i
#
# ens_mu = pred.mean(axis=1)
# ens_sigma = pred.std(axis=1)
add_pre_mu = addpre.loc[:,["d2m_mean","q_pl850_mean","tcc_mean","u_pl850_mean","v_pl850_mean",
"sshf_mean","slhf_mean","u10_mean","v10_mean","cape_mean","sp_mean",
"u_pl500_mean","v_pl500_mean","gh_pl500_mean","ssr_mean","str_mean"]]
add_pre_sigma = addpre.loc[:,["d2m_var","q_pl850_var","tcc_var","u_pl850_var","v_pl850_var",
"sshf_var","slhf_var","u10_var","v10_var","cape_var","sp_var",
"u_pl500_var","v_pl500_var","gh_pl500_var","ssr_var","str_var"]]
n_add = 16
# Inputs for Model 3.2.3
x_train_m323 = [np.concatenate((ens_mu[train_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[train_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[train_dateindex].values.reshape((-1, dim, 1)),
ens_max[train_dateindex].values.reshape((-1, dim, 1)),
ens_min[train_dateindex].values.reshape((-1, dim, 1)),
ens_spread[train_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[train_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[train_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[train_dateindex].values.reshape((-1, dim, 1)),
ens_max[train_dateindex].values.reshape((-1, dim, 1)),
ens_min[train_dateindex].values.reshape((-1, dim, 1)),
ens_spread[train_dateindex].values.reshape((-1, dim, 1)),
addpre[train_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
x_val_m323 = [np.concatenate((ens_mu[val_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[val_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[val_dateindex].values.reshape((-1, dim, 1)),
ens_max[val_dateindex].values.reshape((-1, dim, 1)),
ens_min[val_dateindex].values.reshape((-1, dim, 1)),
ens_spread[val_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[val_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[val_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[val_dateindex].values.reshape((-1, dim, 1)),
ens_max[val_dateindex].values.reshape((-1, dim, 1)),
ens_min[val_dateindex].values.reshape((-1, dim, 1)),
ens_spread[val_dateindex].values.reshape((-1, dim, 1)),
addpre[val_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
x_test_m323 = [np.concatenate((ens_mu[test_dateindex].values.reshape((-1, dim, 1)),
add_pre_mu[test_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_sigma[test_dateindex].values.reshape((-1, dim, 1)),
ens_max[test_dateindex].values.reshape((-1, dim, 1)),
ens_min[test_dateindex].values.reshape((-1, dim, 1)),
ens_spread[test_dateindex].values.reshape((-1, dim, 1)),
add_pre_sigma[test_dateindex].values.reshape((-1, dim, n_add))
), axis=-1),
np.concatenate((ens_mu[test_dateindex].values.reshape((-1, dim, 1)),
ens_sigma[test_dateindex].values.reshape((-1, dim, 1)),
ens_max[test_dateindex].values.reshape((-1, dim, 1)),
ens_min[test_dateindex].values.reshape((-1, dim, 1)),
ens_spread[test_dateindex].values.reshape((-1, dim, 1)),
addpre[test_dateindex].values.reshape((-1, dim, add_dim))
), axis=-1)]
y_train = obser[train_dateindex].values.reshape((-1, dim, 1))
y_val = obser[val_dateindex].values.reshape((-1, dim, 1))
y_test = obser[test_dateindex].values.reshape((-1, dim, 1))
y_train_tmp = y_train
y_val_tmp = y_val
y_test_tmp = y_test
testy = data_obs[test_dateindex]
# model wrapping
# Function to create model, fit and train model, test model, and get the ES output.
# def igep_model3_es(params):
dim_out = DIM
# dim_in_mean = 1
dim_in_features_mu = x_train_m323[0].shape[-1]
dim_in_features_sigma = x_train_m323[1].shape[-1]
dim_in_features_all = x_train_m323[2].shape[-1] # dim_in_features_all != dim_in_features*2 here
n_samples = N_SAMPLES_TRAIN
# optimizer = "Adam"
train_x = x_train_m323
train_y = y_train_tmp
val_x = x_val_m323
val_y = y_val_tmp
test_x = x_test_m323
test_y = testy
if latent_dist == "uniform":
latent_dist_params = (-1.0, 1.0)
elif latent_dist == "normal":
latent_dist_params = (0.0, 1.0)
### Inputs ###
input_mean = keras.Input(shape=(dim_out, dim_in_features_mu), name = "input_mean")
input_sd = keras.Input(shape=(dim_out, dim_in_features_sigma), name = "input_sd")
input_all = keras.Input(shape=(dim_out, dim_in_features_all), name = "input_all")
bs = K.shape(input_mean)[0]
x_mean = layers.LocallyConnected1D(filters=1,
kernel_size=1,
strides=1,
padding='valid',
data_format='channels_last',
use_bias=True,
activation='linear')(input_mean) # (, dim_out, 1)
# x_mean = layers.Flatten()(input_mean)
#
# x_mean = layers.Dense(128, use_bias=True, activation = 'relu')(x_mean)
# x_mean = layers.Dense(64, use_bias=True, activation = 'relu')(x_mean)
# x_mean = layers.Dense(32, use_bias=True, activation = 'relu')(x_mean)
#
# x_mean = layers.Dense(dim_out, use_bias=True, activation = 'linear')(x_mean) # (, dim_out*1)
# x_mean = layers.Reshape((dim_out, 1))(x_mean) # (, dim_out, 1)
# x_mean = layers.LocallyConnected1D(filters=32,
# kernel_size=1,
# strides=1,
# padding='valid',
# data_format='channels_last',
# use_bias=True,
# activation='linear')(input_mean) # (, dim_out, 8)
# x_mean = layers.LocallyConnected1D(filters=1,
# kernel_size=1,
# strides=1,
# padding='valid',
# data_format='channels_last',
# use_bias=True,
# activation='linear')(x_mean) # (, dim_out, 1)
##################################################################
x_mean_all = layers.Lambda(lambda arg: K.repeat_elements(arg, n_samples, axis=-1))(x_mean) # (, dim_out, n_samples)
# z_delta = layers.LocallyConnected1D(filters=1,
# kernel_size=1,
# strides=1,
# padding='valid',
# data_format='channels_last',
# use_bias=True,
# activation='linear')(input_sd) # (, dim_out, 1)
# z_delta = layers.Flatten()(input_sd)
#
# z_delta = layers.Dense(25, use_bias=True, activation = 'elu')(z_delta)
# z_delta = layers.Dense(25, use_bias=True, activation = 'elu')(z_delta)
# z_delta = layers.Dense(25, use_bias=True, activation = 'elu')(z_delta)
# z_delta = layers.Dense(dim_out, use_bias=True, activation = 'linear')(z_delta) # (, dim_out*1)
# z_delta = layers.Reshape((dim_out, 1))(z_delta) # (, dim_out, 1)
z_delta = layers.LocallyConnected1D(filters=16,
kernel_size=1,
strides=1,
padding='valid',
data_format='channels_last',
use_bias=True,
activation='linear')(input_sd) # (, dim_out, 1)
z_delta = layers.LocallyConnected1D(filters=1,
kernel_size=1,
strides=1,
padding='valid',
data_format='channels_last',
use_bias=True,
activation='linear')(z_delta) # (, dim_out, 1)
##################################################################
z_delta_flat = layers.Flatten()(z_delta)
z_delta_final = layers.Dense(dim_latent, activation = 'exponential')(z_delta_flat) # spread of latent variables
z_delta_reshape = layers.Lambda(lambda arg: K.reshape(arg, (bs, dim_latent, 1)))(z_delta_final) # (, dim_latent, 1)
# z_delta = layers.Dense(dim_latent, use_bias=True, activation = 'linear')(z_delta) # (, dim_latent*1)
# z_delta_reshape = layers.Reshape((dim_latent, 1))(z_delta) # (, dim_latent, 1)
if latent_dist == "uniform":
z = layers.Lambda(lambda args: K.random_uniform(shape=(args[0], args[1], args[2]),
minval=latent_dist_params[0],
maxval=latent_dist_params[1]))([bs, dim_latent, n_samples])
elif latent_dist == "normal":
z = layers.Lambda(lambda args: K.random_normal(shape=(args[0], args[1], args[2]),
mean=latent_dist_params[0],
stddev=latent_dist_params[1]))([bs, dim_latent, n_samples])
z_adjust_spread = layers.Multiply()([z_delta_reshape, z]) # (, dim_latent, n_samples)
# weights
# W = layers.Flatten()(input_all)
# for l in range(layer_number):
# W = layers.Dense(nodes_number, use_bias=True, activation = 'elu')(W)
#
# W = layers.Dense(dim_out*dim_latent, use_bias=True, activation = 'linear')(W) # (, dim_out*dim_latent)
# W = layers.Reshape((dim_out, dim_latent))(W) # (, dim_out, dim_latent)
# ##################################################################
#
# z_samples = layers.Dot(axes=(2,1))([W,z_adjust_spread]) # (, dim_out, n_samples)
# y = layers.Add()([x_mean_all,z_samples])
W = layers.Flatten()(input_all)
z_n = layers.Flatten()(z_adjust_spread)
W_con = layers.Concatenate(axis=1)([W, z_n])
W_con = layers.Dense(25, use_bias=True, activation = 'elu')(W_con)
W_con = layers.Dense(25, use_bias=True, activation = 'elu')(W_con)
# W_con = layers.Dense(100, use_bias=True, activation = 'elu')(W_con)
W_con = layers.Dense(dim_out*n_samples, use_bias=True, activation = 'linear')(W_con) # (, dim_out*n_samples)
z_samples = layers.Reshape((dim_out, n_samples))(W_con) # (, dim_out, n_samples)
y = layers.Add()([x_mean_all, z_samples])
model = Model(inputs = [input_mean, input_sd, input_all], outputs = y)
opt = keras.optimizers.Adam(learning_rate = learningrate) # lr default 0.01
model.compile(loss = EnergyScore(), optimizer = opt)
callback = tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', min_delta = 0.001, patience = 3,
restore_best_weights = True)
model.fit(x = train_x, y = train_y,
batch_size = batch_size,
epochs = epochs,
verbose = 0,
callbacks = [callback],
validation_split = 0.0,
validation_data = (val_x, val_y),
sample_weight=None)
N_SAMPLES_TEST = 50
# predict and append to list
S_m3 = []
std_fcst = model.predict(x_test_m323, N_SAMPLES_TEST)
S_m3.append(std_fcst)
pre_dat = np.concatenate(S_m3, axis = 0)
fcst = scaler.inverse_transform(np.reshape(pre_dat, (pre_dat.shape[0]*pre_dat.shape[1],-1)))
# S_m3.append( ( model.predict(x_test_m323, N_SAMPLES_TEST) ) * scale )
ES = es_sample(y = np.reshape(test_y.values, (-1, DIM)), dat = np.reshape(fcst,
(pre_dat.shape[0],pre_dat.shape[1],pre_dat.shape[2]) ) )
pd_output = pd.DataFrame(fcst, index=testy.index)
# print(pd_output)
print(k)
print(ES)
``` |
{
"source": "Jie-Yuan/1_DataMining",
"score": 3
} |
#### File: examples/accelerate/numba.py
```python
from numba import jit, njit, vectorize
import numpy as np
x = np.arange(100).reshape(10, 10)
# Numba likes NumPy broadcasting
# @jit
@njit # 设置为"nopython"模式 有更好的性能
def go_fast1(a): # 第一次调用时会编译
trace = 0
for i in range(a.shape[0]): # Numba likes loops
trace += np.tanh(a[i, i]) # Numba likes NumPy functions
return a + trace
```
#### File: examples/client/rpc.py
```python
import os
from xmlrpc.server import SimpleXMLRPCServer
from socketserver import ThreadingMixIn, ForkingMixIn
class ThreadXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
"""多线程"""
pass
# shell
def sh(cmd):
with os.popen(cmd) as f:
return f.read().split()
if __name__ == '__main__':
server = ThreadXMLRPCServer(('0.0.0.0', 7777), allow_none=True)
# 注册函数
server.register_function(sh, 'sh')
print("服务端")
# 保持等待调用状态
server.serve_forever()
# from xmlrpc.client import ServerProxy
#
# client = ServerProxy('http://10.114.38.22:7777')
# client.sh('ls')
```
#### File: examples/opt/lgb.py
```python
from tql.algo_ml.cv import LGBMClassifierCV
from tql.algo_ml.opt_params import Optimizer
from sklearn.datasets import make_classification
X, y = make_classification(10000)
class Opt(Optimizer):
def objective(self, **params):
"""重写目标函数"""
self.params = params
# 超参
params['num_leaves'] = int(params['num_leaves'])
params['min_child_samples'] = int(params['min_child_samples'])
# 固定参数:TODO 更方便的方式
params['n_estimators'] = 3000
params['subsample_freq'] = 6 # 需要调参不
params['verbosity'] = -1
params['n_jobs'] = 16
# self.params = params
self.clf = LGBMClassifierCV(params)
return self.clf.fit(X, y, X[:1])
opt = Optimizer(X, y)
print(opt.maximize(1))
```
#### File: examples/sanic/restful_sanic.py
```python
import jieba
from restful_api import Api
import requests
from lxml.etree import HTML
from googletrans import Translator
translator = Translator(service_urls=['translate.google.cn', 'translate.google.com'],
timeout=3)
def trans_google(q='苹果', fromLang='auto', toLang='en'):
"""
:param q:
:param fromLang:
:param toLang: zh
:return:
"""
url = "http://translate.google.cn/translate_a/single?client=gtx&dt=t&dj=1&ie=UTF-8&sl=%s&tl=%s" % (fromLang, toLang)
try:
r = requests.get(url, {'q': q}, timeout=3)
text = r.json()['sentences'][0]['trans']
except Exception as e:
print(e)
text = translator.translate(q, toLang, fromLang).text
return text
def get_title(url='https://baijiahao.baidu.com/s?id=1604534610481479105&wfr=spider&for=pc&isFailFlag=1'):
r = requests.get(url, headers={'user-agent': 'Mozilla/5.0'})
r.encoding = r.apparent_encoding
# soup = BeautifulSoup(r.text)
dom_tree = HTML(r.text)
title = dom_tree.xpath('//title/text()')
return title[0]
api = Api('/ctr/trans', trans_google, method='GET', verbose=False)
api = Api('/get_title', get_title, api.app)
api.app.run('0.0.0.0')
# import requests
# json = {'x': 1, 'y': 10}
# requests.post('http://127.0.0.1:8000/post1', json=json).json()
# requests.post('http://127.0.0.1:8000/post2', json=json).json()
# requests.post('http://1172.16.31.10:8000/post3', json=json).json()
```
#### File: 0_DA/udfs/__init__.py
```python
import warnings
# warnings.filterwarnings("ignore")
def ignore_warn(*args, **kwargs):
pass
warnings.warn = ignore_warn
import os
import re
import time
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
from sklearn.preprocessing import *
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
# 目录树
def lst_tree(p='..', n=0):
p = Path(p)
if p.is_file(): # 判断是否是文件
print('|' + '\t|' * n + '-' * 4 + p.name)
elif p.is_dir(): # 判断是否是目录
print('|' + '\t|' * n + '-' * 4 + str(p.relative_to(p.parent)) + '\\')
for pt in p.iterdir():
lst_tree(pt, n + 1) # 递归
```
#### File: 0_DA/udfs/model_selection.py
```python
__title__ = 'prediction'
__author__ = 'JieYuan'
__mtime__ = '2018/2/13'
from .__init__ import *
def cv(clf, X, y, cv=3, stratified=True, seed=42):
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.metrics import roc_auc_score, f1_score, classification_report
if stratified:
kf = StratifiedKFold(cv, True, seed).split(X, y)
else:
kf = KFold(cv, True, seed).split(X, y)
f1_loss = []
auc_loss = []
for i, (train_index, test_index) in enumerate(kf, 1):
X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]
clf.fit(X_train, y_train)
y_pred = clf.predict_proba(X_test)[:, 1]
auc_loss.append(roc_auc_score(y_test, y_pred))
# threshold = sorted(y_pred)[::-1][y_test.sum()]
# y_pred = np.where(y_pred > threshold, 1, 0)
y_pred = get_class(y_pred, y_test.sum())
print("%d flod:\n" % i, classification_report(y_test, y_pred))
f1_loss.append(f1_score(y_test, y_pred))
print(" F1-CV-Score: %0.5f (+/- %0.3f)" % (np.mean(f1_loss), np.std(f1_loss)))
print("Auc-CV-Score: %0.5f (+/- %0.3f)" % (np.mean(auc_loss), np.std(auc_loss)))
return f1_loss, auc_loss
def get_class(y_pred, n_pos):
"""
:param y_pred: clf.predict_proba(X_test)[:, 1]
:param n_pos: n Positive sample
:return: 计算f1可用到
"""
threshold = sorted(y_pred)[::-1][n_pos]
y_pred = np.where(y_pred > threshold, 1, 0)
return y_pred
```
#### File: 3_FeatureEngineering/smbinning/mdlp.py
```python
import pandas as pd
import numpy as np
from entropy import entropy, cut_point_information_gain
from math import log
import sys
import getopt
import re
class MDLP_Discretizer(object):
def __init__(self, dataset, class_label, out_path_data, out_path_bins, features=None):
'''
initializes discretizer object:
saves raw copy of data and creates self._data with only features to discretize and class
computes initial entropy (before any splitting)
self._features = features to be discretized
self._classes = unique classes in raw_data
self._class_name = label of class in pandas_utils dataframe
self._data = partition of data with only features of interest and class
self._cuts = dictionary with cut points for each feature
:param dataset: pandas_utils dataframe with data to discretize
:param class_label: name of the column containing class in input dataframe
:param features: if !None, features that the user wants to discretize specifically
:return:
'''
if not isinstance(dataset, pd.core.frame.DataFrame): # class needs a pandas_utils dataframe
raise AttributeError('input dataset should be a pandas_utils data frame')
self._data_raw = dataset #copy or original input data
self._class_name = class_label
self._classes = self._data_raw[self._class_name].unique()
#if user specifies which attributes to discretize
if features:
self._features = [f for f in features if f in self._data_raw.columns] # check if features in dataframe
missing = set(features) - set(self._features) # specified columns not in dataframe
if missing:
print('WARNING: user-specified features %s not in input dataframe' % str(missing))
else: # then we need to recognize which features are numeric
numeric_cols = self._data_raw._data.get_numeric_data().items
self._features = [f for f in numeric_cols if f != class_label]
#other features that won't be discretized
self._ignored_features = set(self._data_raw.columns) - set(self._features)
#create copy of data only including features to discretize and class
self._data = self._data_raw.loc[:, self._features + [class_label]]
#pre-compute all boundary points in dataset
self._boundaries = self.compute_boundary_points_all_features()
#initialize feature bins with empty arrays
self._cuts = {f: [] for f in self._features}
#get cuts for all features
self.all_features_accepted_cutpoints()
#discretize self._data
self.apply_cutpoints(out_data_path=out_path_data, out_bins_path=out_path_bins)
def MDLPC_criterion(self, data, feature, cut_point):
'''
Determines whether a partition is accepted according to the MDLPC criterion
:param feature: feature of interest
:param cut_point: proposed cut_point
:param partition_index: index of the sample (dataframe partition) in the interval of interest
:return: True/False, whether to accept the partition
'''
#get dataframe only with desired attribute and class columns, and split by cut_point
data_partition = data.copy(deep=True)
data_left = data_partition[data_partition[feature] <= cut_point]
data_right = data_partition[data_partition[feature] > cut_point]
#compute information gain obtained when splitting data at cut_point
cut_point_gain = cut_point_information_gain(dataset=data_partition, cut_point=cut_point,
feature_label=feature, class_label=self._class_name)
#compute delta term in MDLPC criterion
N = len(data_partition) # number of examples in current partition
partition_entropy = entropy(data_partition[self._class_name])
k = len(data_partition[self._class_name].unique())
k_left = len(data_left[self._class_name].unique())
k_right = len(data_right[self._class_name].unique())
entropy_left = entropy(data_left[self._class_name]) # entropy of partition
entropy_right = entropy(data_right[self._class_name])
delta = log(3 ** k, 2) - (k * partition_entropy) + (k_left * entropy_left) + (k_right * entropy_right)
#to split or not to split
gain_threshold = (log(N - 1, 2) + delta) / N
if cut_point_gain > gain_threshold:
return True
else:
return False
def feature_boundary_points(self, data, feature):
'''
Given an attribute, find all potential cut_points (boundary points)
:param feature: feature of interest
:param partition_index: indices of rows for which feature value falls whithin interval of interest
:return: array with potential cut_points
'''
#get dataframe with only rows of interest, and feature and class columns
data_partition = data.copy(deep=True)
data_partition.sort_values(feature, ascending=True, inplace=True)
boundary_points = []
#add temporary columns
data_partition['class_offset'] = data_partition[self._class_name].shift(1) # column where first value is now second, and so forth
data_partition['feature_offset'] = data_partition[feature].shift(1) # column where first value is now second, and so forth
data_partition['feature_change'] = (data_partition[feature] != data_partition['feature_offset'])
data_partition['mid_points'] = data_partition.loc[:, [feature, 'feature_offset']].mean(axis=1)
potential_cuts = data_partition[data_partition['feature_change'] == True].index[1:]
sorted_index = data_partition.index.tolist()
for row in potential_cuts:
old_value = data_partition.loc[sorted_index[sorted_index.index(row) - 1]][feature]
new_value = data_partition.loc[row][feature]
old_classes = data_partition[data_partition[feature] == old_value][self._class_name].unique()
new_classes = data_partition[data_partition[feature] == new_value][self._class_name].unique()
if len(set.union(set(old_classes), set(new_classes))) > 1:
boundary_points += [data_partition.loc[row]['mid_points']]
return set(boundary_points)
def compute_boundary_points_all_features(self):
'''
Computes all possible boundary points for each attribute in self._features (features to discretize)
:return:
'''
boundaries = {}
for attr in self._features:
data_partition = self._data.loc[:, [attr, self._class_name]]
boundaries[attr] = self.feature_boundary_points(data=data_partition, feature=attr)
return boundaries
def boundaries_in_partition(self, data, feature):
'''
From the collection of all cut points for all features, find cut points that fall within a feature-partition's
attribute-values' range
:param data: data partition (pandas_utils dataframe)
:param feature: attribute of interest
:return: points within feature's range
'''
range_min, range_max = (data[feature].min(), data[feature].max())
return set([x for x in self._boundaries[feature] if (x > range_min) and (x < range_max)])
def best_cut_point(self, data, feature):
'''
Selects the best cut point for a feature in a data partition based on information gain
:param data: data partition (pandas_utils dataframe)
:param feature: target attribute
:return: value of cut point with highest information gain (if many, picks first). None if no candidates
'''
candidates = self.boundaries_in_partition(data=data, feature=feature)
# candidates = self.feature_boundary_points(data=data, feature=feature)
if not candidates:
return None
gains = [(cut, cut_point_information_gain(dataset=data, cut_point=cut, feature_label=feature,
class_label=self._class_name)) for cut in candidates]
gains = sorted(gains, key=lambda x: x[1], reverse=True)
return gains[0][0] #return cut point
def single_feature_accepted_cutpoints(self, feature, partition_index=pd.DataFrame().index):
'''
Computes the cuts for binning a feature according to the MDLP criterion
:param feature: attribute of interest
:param partition_index: index of examples in data partition for which cuts are required
:return: list of cuts for binning feature in partition covered by partition_index
'''
if partition_index.size == 0:
partition_index = self._data.index # if not specified, full sample to be considered for partition
data_partition = self._data.loc[partition_index, [feature, self._class_name]]
#exclude missing data:
if data_partition[feature].isnull().values.any:
data_partition = data_partition[~data_partition[feature].isnull()]
#stop if constant or null feature values
if len(data_partition[feature].unique()) < 2:
return
#determine whether to cut and where
cut_candidate = self.best_cut_point(data=data_partition, feature=feature)
if cut_candidate == None:
return
decision = self.MDLPC_criterion(data=data_partition, feature=feature, cut_point=cut_candidate)
#apply decision
if not decision:
return # if partition wasn't accepted, there's nothing else to do
if decision:
# try:
#now we have two new partitions that need to be examined
left_partition = data_partition[data_partition[feature] <= cut_candidate]
right_partition = data_partition[data_partition[feature] > cut_candidate]
if left_partition.empty or right_partition.empty:
return #extreme point selected, don't partition
self._cuts[feature] += [cut_candidate] # accept partition
self.single_feature_accepted_cutpoints(feature=feature, partition_index=left_partition.index)
self.single_feature_accepted_cutpoints(feature=feature, partition_index=right_partition.index)
#order cutpoints in ascending order
self._cuts[feature] = sorted(self._cuts[feature])
return
def all_features_accepted_cutpoints(self):
'''
Computes cut points for all numeric features (the ones in self._features)
:return:
'''
for attr in self._features:
self.single_feature_accepted_cutpoints(feature=attr)
return
def apply_cutpoints(self, out_data_path=None, out_bins_path=None):
'''
Discretizes data by applying bins according to self._cuts. Saves a new, discretized file, and a description of
the bins
:param out_data_path: path to save discretized data
:param out_bins_path: path to save bins description
:return:
'''
bin_label_collection = {}
for attr in self._features:
if len(self._cuts[attr]) == 0:
self._data[attr] = 'All'
bin_label_collection[attr] = ['All']
else:
cuts = [-np.inf] + self._cuts[attr] + [np.inf]
start_bin_indices = range(0, len(cuts) - 1)
bin_labels = ['%s_to_%s' % (str(cuts[i]), str(cuts[i+1])) for i in start_bin_indices]
bin_label_collection[attr] = bin_labels
self._data[attr] = pd.cut(x=self._data[attr].values, bins=cuts, right=False, labels=bin_labels,
precision=6, include_lowest=True)
#reconstitute full data, now discretized
if self._ignored_features:
to_return = pd.concat([self._data, self._data_raw[list(self._ignored_features)]], axis=1)
to_return = to_return[self._data_raw.columns] #sort columns so they have the original order
else:
to_return = self._data
#save data as csv
if out_data_path:
print(out_data_path)
to_return.to_csv(out_data_path)
#save bins description
if out_bins_path:
with open(out_bins_path, 'w') as bins_file:
print('Description of bins in file: %s' % out_data_path, file=bins_file)
for attr in self._features:
print('attr: %s\n\t%s' % (attr, ', '.join([bin_label for bin_label in bin_label_collection[attr]])), file=bins_file)
```
#### File: 2_FactorizationMachine/FFM/OneEncoding2libffm.py
```python
def df2libffm(df, field_Category, field_Numeric=[]):
libffm = []
num_n = len(field_Numeric)
csr = OneHotEncoder().fit_transform(df[field_Category])
for i in range(len(csr.indptr) - 1):
ls = []
k = csr.indptr[i + 1] - csr.indptr[i]
for j in range(k):
ls.append((k - j + num_n, csr.indices[i * k + j] + num_n, 1))
libffm.append(ls)
if field_Numeric:
for i in range(len(libffm)):
for j in range(len(field_Numeric)):
libffm[i].append((j + 1, j + 1, df[field_Numeric[j]].iloc[i]))
return libffm
```
#### File: 6_ModelEnsembling/3_Stacking/stacking_feature.py
```python
def stacking_feature(clf, X, y, nb_cv=3):
"""种子扰动
tfidf_lr = make_pipeline(TfidfVectorizer(), LogisticRegression())
tfidf_lr.fit(X, y)
tfidf_lr.predict_proba(X)
"""
pred_list_stack = []
for i in range(nb_cv):
pred_list = []
auc_loss = []
kf = StratifiedKFold(nb_cv, True).split(X, y)
for i, (train_index, test_index) in enumerate(kf, 1):
X_train, X_test, y_train, y_test = X[train_index], X[test_index], y[train_index], y[test_index]
clf.fit(X_train, y_train)
y_test_pred = clf.predict_proba(X_test)[:, 1]
pred_list += y_test_pred.tolist()
auc_loss.append(roc_auc_score(y_test, y_test_pred))
pred_list_stack.append(pred_list)
print("Auc-CV-Score: %0.5f (+/- %0.3f)" % (np.mean(auc_loss), np.std(auc_loss)))
return np.column_stack(pred_list_stack)
cross_val_predict(lr, X, y, cv=StratifiedKFold(5, True, random_state=2018+i), method='predict_proba')
```
#### File: Torch/02_TASK/numpy_GB.py
```python
import numpy as np
def gen_data(num=100):
"""""
y = 3*x1+4*x2
"""""
x1 = np.linspace(0, 9, num)
x2 = np.linspace(4, 13, num)
x = np.concatenate(([x1], [x2]), axis=0).T
y = np.dot(x, np.array([3, 4]).T) # y 列向量
return x, y
def sgd(samples, y, step_size=0.01, max_iter_count=10000):
sample_num, dim = samples.shape
y = y.flatten()
w = np.ones((dim,), dtype=np.float32)
loss = 10
iter_count = 0
while loss > 0.001 and iter_count < max_iter_count:
loss = 0
error = np.zeros((dim,), dtype=np.float32)
for i in range(sample_num):
predict_y = np.dot(w.T, samples[i])
for j in range(dim):
error[j] += (y[i] - predict_y) * samples[i][j]
w[j] += step_size * error[j] / sample_num
# for j in range(dim):
# w[j] += step_size * error[j] / sample_num
for i in range(sample_num):
predict_y = np.dot(w.T, samples[i])
error = (1 / (sample_num * dim)) * np.power((predict_y - y[i]), 2)
loss += error
print("iter_count: ", iter_count, "the loss:", loss)
iter_count += 1
return w
if __name__ == '__main__':
x, y = gen_data()
w = sgd(x, y)
print(w)
```
#### File: Torch/05_TASK/l1l2.py
```python
import torch
def l1_penalty(var):
return torch.abs(var).sum()
def l2_penalty(var):
return torch.sqrt(torch.pow(var, 2).sum())
torch.optim.SGD # weight_decay表示l2正则
```
#### File: keras/models/TextCNN.py
```python
from .BaseModel import BaseModel
from tensorflow.python.keras import Input, Model
from tensorflow.python.keras.layers import Dense, Concatenate, Dropout
from tensorflow.python.keras.layers import Conv1D, GlobalMaxPool1D, GlobalAvgPool1D
class TextCNN(BaseModel):
""" TextCNN:
1. embedding layers: embeddings = model.layers[0].get_weights()[0]
2. convolution layer,
3. max-pooling,
4. softmax layer.
数据量较大:可以直接随机初始化embeddings,然后基于语料通过训练模型网络来对embeddings进行更新和学习。
数据量较小:可以利用外部语料来预训练(pre-train)词向量,然后输入到Embedding层,用预训练的词向量矩阵初始化embeddings。(通过设置weights=[embedding_matrix])。
静态(static)方式:训练过程中不再更新embeddings。实质上属于迁移学习,特别是在目标领域数据量比较小的情况下,采用静态的词向量效果也不错。(通过设置trainable=False)
非静态(non-static)方式:在训练过程中对embeddings进行更新和微调(fine tune),能加速收敛。(通过设置trainable=True)
"""
def __init__(self, max_tokens, maxlen=128, embedding_size=None, num_class=1, kernel_size_list=(3, 4, 5),
weights=None):
"""
:param embedding_size: 类别/实体嵌入时可不指定
model = TextCNN(max_token, maxlen, num_class=1)()
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
model.fit_generator(DataIter(X, y), epochs=5)
"""
super().__init__(max_tokens, maxlen, embedding_size, num_class, weights)
self.kernel_size_list = kernel_size_list
def get_model(self):
input = Input(self.maxlen)
# Embedding part can try multichannel as same as origin paper
embedding = self.embedding_layer(input)
convs = []
for kernel_size in self.kernel_size_list:
c = Conv1D(128, kernel_size, activation='relu')(embedding) # 卷积
# c = Dropout(0.5)(c)
p = GlobalMaxPool1D()(c) # 池化
# p = GlobalAvgPool1D()(c)
convs.append(p)
x = Concatenate()(convs)
output = Dense(self.num_class, activation=self.last_activation)(x)
model = Model(inputs=input, outputs=output)
return model
if __name__ == '__main__':
TextCNN(1000, 10)()
```
#### File: keras/utils/ConfigKeras.py
```python
import os
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
class ConfigKeras(object):
"""
https://www.cnblogs.com/wuliytTaotao/p/10883749.html
"""
def __init__(self, seed=2019):
self._seed = seed
def set_seed(self):
os.environ["PYTHONHASHSEED"] = '0'
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
random.seed(self._seed)
np.random.seed(self._seed)
# config = tf.estimator.RunConfig(tf_random_seed=234)
# tf.contrib.learn.RunConfig(tf_random_seed=234)
tf.set_random_seed(self._seed)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) # K.get_session().graph.as_default()
K.set_session(sess)
```
#### File: utils/models/BILSTM.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class Config(object):
# Data
class_num = 2
# Embedding
pretrained_embedding = None # pretrained_weight
vocab_dim = 128 if pretrained_embedding is None else pretrained_embedding.shape[1]
vocab_size = 10000
# RNN
rnn_layers_num = 2
rnn_dropout = 0
# Linear
fc_dropout = 0
opt = Config()
class BiLSTM(nn.Module):
def __init__(self):
super(BiLSTM, self).__init__()
# self.embed = nn.Embedding(V, D, max_norm=config.max_norm)
self.embed = nn.Embedding(opt.vocab_size, opt.vocab_dim)
# pretrained embedding
if opt.pretrained_embedding:
self.embed.weight.data.copy_(opt.pretrained_embedding)
# self.embed.weight.requires_grad = False # 冻结词向量
self.bilstm = nn.LSTM(
opt.vocab_dim,
opt.vocab_dim // 2,
dropout=opt.rnn_dropout,
num_layers=opt.rnn_layers_num,
batch_first=True, # input & output will has batch size as 1s dimension. e.g. (batch, time_step, input_size)
bidirectional=True)
print(self.bilstm)
self.fc1 = nn.Linear(opt.vocab_dim // 2 * 2, opt.vocab_dim // 2)
self.fc2 = nn.Linear(opt.vocab_dim // 2, opt.class_num)
self.dropout = nn.Dropout(opt.fc_dropout)
def forward(self, x):
embed = self.embed(x)
x = embed.view(len(x), embed.size(1), -1)
# x shape (batch, time_step, input_size)
# r_out shape (batch, time_step, output_size)
# h_n shape (n_layers, batch, hidden_size)
# h_c shape (n_layers, batch, hidden_size)
r_out, (h_n, h_c) = self.bilstm(x)
r_out = F.relu(r_out)
# r_out = F.max_pool1d(r_out, r_out.size(2)).squeeze(2)
# y = self.fc1(r_out)
# y = self.fc2(y)
# choose r_out at the last time step
y = self.fc1(r_out[:, -1, :])
y = self.fc2(y)
return torch.softmax(y)
```
#### File: algo_ml/cv/CatBoostClassifierCV.py
```python
import time
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold, KFold, cross_val_predict, cross_validate
from catboost import CatBoostClassifier
from sklearn.metrics import roc_auc_score
import os
cloudml = os.path.exists('/fds')
class CatBoostClassifierCV(object):
"""cross_val_predict"""
def __init__(self, params=None, cv=5, random_state=None, n_repeats=None):
self.clf = CatBoostClassifier()
if params:
self.clf.set_params(**params)
if n_repeats:
self._kf = RepeatedStratifiedKFold(cv, True, random_state)
self._num_preds = cv * n_repeats
else:
self._kf = StratifiedKFold(cv, True, random_state)
self._num_preds = cv
def fit(self, X, y, X_test, feval=roc_auc_score, cat_features=None, sample_weight=None, verbose=100,
early_stopping_rounds=100, plot=False, silent=None,
logging_level=None, column_description=None, save_snapshot=None,
snapshot_file='/fds/data' if cloudml else None, snapshot_interval=None,
init_model=None):
"""输入数组"""
self.oof_train = np.zeros(len(X))
self.oof_test = np.zeros((len(X_test), self._num_preds))
for n_fold, (train_index, valid_index) in enumerate(self._kf.split(X, y)):
if verbose:
print("\033[94mFold %s started at %s\033[0m" % (n_fold + 1, time.ctime()))
X_train, y_train = X[train_index], y[train_index]
X_valid, y_valid = X[valid_index], y[valid_index]
# eval_set = [(X_train, y_train), (X_valid, y_valid)]
########################################################################
self.clf.fit(X_train, y_train,
cat_features=cat_features,
sample_weight=sample_weight,
use_best_model=True,
eval_set=(X_valid, y_valid),
verbose=verbose,
logging_level=logging_level,
plot=plot,
column_description=column_description,
silent=silent,
early_stopping_rounds=early_stopping_rounds,
save_snapshot=save_snapshot,
snapshot_file=snapshot_file,
snapshot_interval=snapshot_interval,
init_model=init_model)
self.oof_train[valid_index] = self.clf.predict_proba(X_valid)[:, 1]
self.oof_test[:, n_fold] = self.clf.predict_proba(X_test)[:, 1]
########################################################################
# 输出 测试集 oof
self.oof_test_rank = pd.DataFrame(self.oof_test).rank().mean(1) / len(self.oof_test)
self.oof_test = self.oof_test.mean(1)
# 计算 训练集 oof 得分
if feval:
score = feval(y, self.oof_train)
print(f"\n\033[94mCV Score: {score} ended at {time.ctime()}\033[0m")
return score
def oof_save(self, file='./oof_train_and_test.csv'):
assert isinstance(file, str)
_ = np.append(self.oof_train, self.oof_test)
pd.DataFrame(_, columns='oof_train_and_test').to_csv(file, index=False)
if __name__ == "__main__":
from sklearn.datasets import make_classification
X, y = make_classification()
X_test, _ = make_classification()
clf = CatBoostClassifierCV({'n_estimators': 100, 'eval_metric': 'AUC'})
clf.fit(X, y, X_test, verbose=50, plot=True)
```
#### File: algo_ml/cv/FMModelCV.py
```python
import time
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold, KFold, cross_val_predict, cross_validate
from sklearn.metrics import roc_auc_score
from xlearn import FMModel, FMModel
class FMModelCV(object):
"""cross_val_predict"""
def __init__(self, params=None, cv=5, cv_seed=None, n_repeats=None):
self.clf = FMModel()
self.cv = cv
if params:
self.clf.set_params(**params)
if n_repeats:
self._kf = RepeatedStratifiedKFold(cv, True, cv_seed)
self._num_preds = cv * n_repeats
else:
self._kf = StratifiedKFold(cv, True, cv_seed)
self._num_preds = cv
def fit(self, X, y, X_test=None, feval=roc_auc_score, fix_valid_index=None, fields=None,
is_lock_free=True, is_instance_norm=True, is_quiet=False, verbose=1):
if X_test is None:
X_test = X[:1]
self.oof_train = np.zeros(len(X))
self.oof_test = np.zeros((len(X_test), self._num_preds))
for n_fold, (train_index, valid_index) in enumerate(self._kf.split(X, y)):
if verbose:
print("\033[94mFold %s started at %s\033[0m" % (n_fold + 1, time.ctime()))
# 设置valid早停范围
if fix_valid_index is not None:
valid_index = list(set(fix_valid_index) & set(valid_index))
X_train, y_train = X[train_index], y[train_index]
X_valid, y_valid = X[valid_index], y[valid_index]
# eval_set = [(X_train, y_train), (X_valid, y_valid)]
########################################################################
self.clf.fit(
X_train,
y_train,
fields,
is_lock_free,
is_instance_norm,
[X_valid, y_valid],
is_quiet
)
self.oof_train[valid_index] = self.clf.predict(X_valid)
self.oof_test[:, n_fold] = self.clf.predict(X_test)
########################################################################
# 输出 测试集 oof
self.oof_test_rank = (pd.DataFrame(self.oof_test).rank().mean(1) / len(self.oof_test)).values
self.oof_test = self.oof_test.mean(1)
assert len(X) == len(self.oof_train)
assert len(X_test) == len(self.oof_test)
# 计算 训练集 oof 得分
if feval:
self.oof_score = feval(y, self.oof_train)
print("\n\033[94mScore Info:\033[0m")
print(f"\033[94m {self.cv:>2} CV: {self.oof_score:.6f}\033[0m")
# _ = np.array(self.best_info['best_iteration'])
# print(f"\033[94m Iter: {_.mean():.0f} +/- {_.std():.0f}\033[0m")
#
# _ = np.array(self.best_info['best_score_valid'])
# print(f"\033[94m Valid: {_.mean():.6f} +/- {_.std():.6f} \033[0m\n")
return self.oof_score
@property
def oof_train_and_test(self):
return np.r_[self.oof_train, self.oof_test]
if __name__ == "__main__":
from sklearn.datasets import make_classification
X, y = make_classification(1000, random_state=666)
clf = FMModelCV()
clf.fit(X, y)
```
#### File: algo_ml/cv/XGBClassifierCV.py
```python
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold, KFold, cross_val_predict, cross_validate
from sklearn.metrics import roc_auc_score
from xgboost import XGBClassifier
class XGBClassifierCV(object):
"""cross_val_predict"""
def __init__(self, params=None, cv=5, random_state=None, n_repeats=None):
self.clf = XGBClassifier()
self.cv = cv
if params:
self.clf.set_params(**params)
if n_repeats:
self._kf = RepeatedStratifiedKFold(cv, True, random_state)
self._num_preds = cv * n_repeats
else:
self._kf = StratifiedKFold(cv, True, random_state)
self._num_preds = cv
def fit(self, X, y, X_test=None, feval=roc_auc_score, sample_weight=None, eval_metric='auc',
early_stopping_rounds=300, verbose=100, xgb_model=None, sample_weight_eval_set=None, callbacks=None):
"""输入数组"""
self.best_info = {}
if X_test is None:
X_test = X[:1]
self.oof_train = np.zeros(len(X))
self.oof_test = np.zeros((len(X_test), self._num_preds))
for n_fold, (train_index, valid_index) in enumerate(self._kf.split(X, y)):
if verbose:
print("\033[94mFold %s started at %s\033[0m" % (n_fold + 1, time.ctime()))
X_train, y_train = X[train_index], y[train_index]
X_valid, y_valid = X[valid_index], y[valid_index]
eval_set = [(X_train, y_train), (X_valid, y_valid)]
########################################################################
self.clf.fit(X_train, y_train, sample_weight, eval_set=eval_set, eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds, verbose=verbose,
xgb_model=xgb_model, sample_weight_eval_set=sample_weight_eval_set,
callbacks=callbacks)
self.oof_train[valid_index] = self.clf.predict_proba(X_valid)[:, 1]
self.oof_test[:, n_fold] = self.clf.predict_proba(X_test)[:, 1]
# best info
# print(self.clf.evals_result_)
self.best_info.setdefault('best_iteration', []).append(self.clf.best_iteration)
self.best_info.setdefault('best_score_valid', []).append(self.clf.best_score)
########################################################################
# 输出 测试集 oof
self.oof_test_rank = (pd.DataFrame(self.oof_test).rank().mean(1) / len(self.oof_test)).values
self.oof_test = self.oof_test.mean(1)
assert len(X) == len(self.oof_train)
assert len(X_test) == len(self.oof_test)
# 计算 训练集 oof 得分
if feval:
self.oof_score = feval(y, self.oof_train)
print("\n\033[94mScore Info:\033[0m")
print(f"\033[94m {self.cv:>2} CV: {self.oof_score:.6f}\033[0m")
_ = np.array(self.best_info['best_iteration'])
print(f"\033[94m Iter: {_.mean():.0f} +/- {_.std():.0f}\033[0m")
_ = np.array(self.best_info['best_score_valid'])
print(f"\033[94m Valid: {_.mean():.6f} +/- {_.std():.6f} \033[0m\n")
return self.oof_score
def oof_submit(self, ids, pred_ranking=False, file=None, preds=None):
"""preds藏分用"""
if file is None:
file = f'submit_cv{self.cv}_{self.oof_score}.csv'
print(f'Save {file} ...')
if preds is None:
preds = self.oof_test_rank if pred_ranking else self.oof_test
if not isinstance(ids, pd.DataFrame):
ids = pd.DataFrame(ids)
ids.assign(preds=preds).to_csv(file, index=False, header=False)
@property
def oof_train_and_test(self):
return np.r_[self.oof_train, self.oof_test]
def oof_save(self, file='./oof_train_and_test.csv'):
pd.DataFrame(self.oof_train_and_test, columns=['oof_train_and_test']).to_csv(file, index=False)
def plot_feature_importances(self, feature_names=None, topk=20, figsize=(10, 6), pic_name=None):
columns = ['Importances', 'Features']
importances = self.clf.feature_importances_.tolist()
if feature_names is None:
feature_names = list(map(lambda x: f'F_{x}', range(len(importances))))
_ = list(zip(importances, feature_names))
df = pd.DataFrame(_, columns=columns).sort_values('Importances', 0, False)
plt.figure(figsize=figsize)
sns.barplot(*columns, data=df[:topk])
plt.title('Features Importances\n')
plt.tight_layout()
if pic_name is None:
plt.savefig(f'importances_{self.oof_score}.png')
if __name__ == "__main__":
from sklearn.datasets import make_classification
X, y = make_classification()
X_test, _ = make_classification()
clf = XGBClassifierCV({'n_estimators': 1000})
clf.fit(X, y, X_test)
```
#### File: algo_ml/explainer/ExplainerTabular.py
```python
class ExplainerTabular(object):
def __init__(self):
pass
```
#### File: algo_ml/explainer/ExplainerText.py
```python
from nlp.models import BaselineBow
from sklearn.linear_model import LogisticRegression
from lime.lime_text import LimeTextExplainer
# import eli5
# from eli5.lime import TextExplainer
#
# te = TextExplainer(random_state=42)
# te.fit(doc, pipe.predict_proba)
# te.show_prediction(target_names=twenty_train.target_names)
class ExplainerText(object):
"""
X = df.review.astype(str).map(lambda x: ' '.join(jieba.cut(x)))
y = df.label
enlp = ExplainNLP()
enlp.fit(X, y)
enlp.explain(X[0])
"""
def __init__(self, estimator=LogisticRegression(), class_names=None):
self._baseline = BaselineBow(estimator)()
self._explainer = LimeTextExplainer(verbose=True, class_names=class_names)
def fit(self, X, y):
self._baseline.fit(X, y)
return self._baseline
def explain(self, sentence, num_features=6):
"""
:param sentence: '分词 空格 拼接'
:param num_features:
:return:
"""
exp = self._explainer.explain_instance(
sentence, self._baseline.predict_proba, num_features=num_features)
exp.show_in_notebook(text=1 if len(sentence) < 256 else 0)
return exp
```
#### File: features/agg/Funcs.py
```python
import numpy as np
class Funcs(object):
"""x: pd.Series"""
def __init__(self):
self.num = ['q1', 'q3', 'iqr', 'kurt', 'cv', 'p2p']
self.cat = ['mode', 'nunique_perc']
@property
def num_funcs(self):
return [self.__getattribute__(func_name) for func_name in self.num]
@property
def cat_funcs(self):
return [self.__getattribute__(func_name) for func_name in self.cat]
# cat funcs
def mode(self, x):
return x.value_counts().index[0]
def nunique_perc(self, x):
return x.nunique() / x.count()
# num funcs
def q1(self, x):
return x.quantile(0.25)
def q3(self, x):
return x.quantile(0.75)
def iqr(self, x):
return x.quantile(0.75) - x.quantile(0.25)
def p2p(self, x):
return np.ptp(x)
def kurt(self, x):
return x.kurt()
def cv(self, x):
return x.std() / (x.mean() + 1e-8) # 变异系数
def count_nonzero(self, x):
return np.count_nonzero(x)
```
#### File: features/cat/VecFeats.py
```python
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from tqdm import tqdm
class VecFeats(object):
"""
tfv = TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1)
"""
def __init__(self, df, cat_feats, vectorizer=TfidfVectorizer(ngram_range=(1, 1), max_features=1000)):
self.df = df
self.cat_feats = cat_feats
self.vectorizer = vectorizer
def get_vectors(self):
return self.vectorizer.fit_transform(self.corpus)
@property
def corpus(self):
df = self.df[self.cat_feats]
for idx, feat in tqdm(enumerate(self.cat_feats)):
df[feat] = '%s_' % idx + df[feat].astype(str)
return df.apply(lambda x: ' '.join(x), 1)
```
#### File: features/transformer/RankEncoder.py
```python
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class RankEncoder(BaseEstimator, TransformerMixin):
def __init__(self, method='average', na_option='keep'):
"""
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value
(i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
"""
self.method = method
self.na_option = na_option
def transform(self, y):
"""不在训练集的补0,不经常出现补0"""
return pd.Series(y).rank(method=self.method, na_option=self.na_option) # .fillna(0)
if __name__ == '__main__':
import numpy as np
s = ['a', 'a', 'b', 'b', 'c'] + [np.nan] * 6
re = RankEncoder(na_option='keep')
print(re.transform(s))
```
#### File: features/ts/DateTimeFeats.py
```python
import pandas as pd
from tqdm import tqdm
from datetime import timedelta
tqdm.pandas()
class DateTimeFeats(object):
"""
pandas_utils 时间/日期特征工程
常见格式:
1. 时间戳
2. 时间字符串
# 很强大: 解析不出为空
pd.to_datetime(ts, 'coerce', unit='s', infer_datetime_format=True)
"""
def __init__(self, include_feats=None):
"""
:param include_feats: 默认
("year", "quarter", "month", "day", "hour", "minute", "week", "weekday", "weekofyear")
weekofyear == week?
TODO: + "DayOfWeekInMonth": 当月第几周
利用python获取某年中每个月的第一天和最后一天
"""
self.time_granularity = ("year", "quarter", "month",
"day", "hour", "minute",
"week", "weekday", "weekofyear")
self.feats = include_feats if include_feats else self.time_granularity
def transform(self, s: pd.Series, add_prefix=None):
if s.name is None:
s.name = 'time_str'
if add_prefix is None:
add_prefix = f"{s.name}_"
feats = self.feats
_dtype = s.dtypes.__str__()
if _dtype.__contains__('int') or _dtype.__contains__('float'): # 时间戳 10位是秒 13位是毫秒
print("infer_datetime_format: timestamp2date")
ts = self.timestamp2date(s)
else:
print('infer_datetime_format: dateStr2date')
ts = self.dateStr2date(s)
_ = ts.progress_map(lambda t: list(self._func(t, feats)))
df_ts = pd.DataFrame(_.tolist(), columns=feats).add_prefix(add_prefix)
df_ts.insert(0, f'{s.name}2date', ts)
return df_ts
def _func(self, t, feats):
for feat in feats:
_ = t.__getattribute__(feat)
if callable(_):
yield _()
else:
yield _
def timestamp2date(self, ts):
return pd.to_datetime(ts, 'coerce', unit='s', infer_datetime_format=True)
def dateStr2date(self, ts):
try:
_ = ts.astype('datetime64[ns]')
except Exception as e:
print("astype('datetime64[ns]'): %s" % e)
_ = pd.to_datetime(ts, 'coerce', infer_datetime_format=True)
return _
def DayOfWeekInMonth(self, t):
"""
获取指定的某天是某个月的第几周
周一为一周的开始
实现思路:就是计算当天在本年的第y周,本月一1号在本年的第x周,然后求差即可。
"""
b = int((t - timedelta(t.day - 1)).strftime("%W"))
e = int(t.strftime("%W"))
return e - b + 1
if __name__ == '__main__':
import pandas as pd
ts = pd.Series([pd.datetime.today()] * 10)
print(DateTimeFeats().transform(ts))
```
#### File: tql/algo_ml/ftrl.py
```python
from datetime import datetime
from csv import DictReader
from math import exp, log, sqrt
import gzip
import random
import json
import argparse
class FTRLProximal(object):
"""
FTRL Proximal engineer project with logistic regression
Reference:
https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/41159.pdf
"""
def __init__(self, alpha, beta, L1, L2, D,
interaction=False, dropout=1.0,
dayfeature=True,
device_counters=False):
# parameters
self.alpha = alpha
self.beta = beta
self.L1 = L1
self.L2 = L2
self.dayfeature = dayfeature
self.device_counters = device_counters
# feature related parameters
self.D = D
self.interaction = interaction
self.dropout = dropout
# model
self.n = [0.] * D
self.z = [0.] * D
self.w = [0.] * D
def _indices(self, x):
'''
A helper generator that yields the indices in x
The purpose of this generator is to make the following
code a bit cleaner when doing feature interaction.
'''
for i in x:
yield i
if self.interaction:
D = self.D
L = len(x)
for i in range(1, L): # skip bias term, so we start at 1
for j in range(i + 1, L):
# one-hot encode interactions with hash trick
yield abs(hash(str(x[i]) + '_' + str(x[j]))) % D
def predict(self, x, dropped=None):
"""
use x and computed weight to get predict
:param x:
:param dropped:
:return:
"""
# wTx is the inner product of w and x
wTx = 0.
for j, i in enumerate(self._indices(x)):
if dropped is not None and dropped[j]:
continue
wTx += self.w[i]
if dropped is not None:
wTx /= self.dropout
# bounded sigmoid function, this is the probability estimation
return 1. / (1. + exp(-max(min(wTx, 35.), -35.)))
def update(self, x, y):
"""
update weight and coordinate learning rate based on x and y
:param x:
:param y:
:return:
"""
ind = [i for i in self._indices(x)]
if self.dropout == 1:
dropped = None
else:
dropped = [random.random() > self.dropout for i in range(0, len(ind))]
p = self.predict(x, dropped)
# gradient under logloss
g = p - y
# update z and n
for j, i in enumerate(ind):
# implement dropout as overfitting prevention
if dropped is not None and dropped[j]:
continue
g_i = g * i
sigma = (sqrt(self.n[i] + g_i * g_i) - sqrt(self.n[i])) / self.alpha
self.z[i] += g_i - sigma * self.w[i]
self.n[i] += g_i * g_i
sign = -1. if self.z[i] < 0 else 1. # get sign of z[i]
# build w on the fly using z and n, hence the name - lazy weights -
if sign * self.z[i] <= self.L1:
# w[i] vanishes due to L1 regularization
self.w[i] = 0.
else:
# apply prediction time L1, L2 regularization to z and get
self.w[i] = (sign * self.L1 - self.z[i]) \
/ ((self.beta + sqrt(self.n[i])) / self.alpha + self.L2)
def save_model(self, save_file):
"""
保存weight数据到本地
:param save_file:
:return:
"""
with open(save_file, "w") as f:
w = {k: v for k, v in enumerate(self.w) if v != 0}
z = {k: v for k, v in enumerate(self.z) if v != 0}
n = {k: v for k, v in enumerate(self.n) if v != 0}
data = {
'w': w,
'z': z,
'n': n
}
json.dump(data, f)
def load_weight(self, model_file, D):
"""
loada weight data
:param model_file:
:return:
"""
with open(model_file, "r") as f:
data = json.load(f)
self.w = data.get('w', [0.] * D)
self.z = data.get('z', [0.] * D)
self.n = data.get('n', [0.] * D)
@staticmethod
def loss(y, y_pred):
"""
log loss for LR model
:param y:
:param y_pred:
:return:
"""
p = max(min(y_pred, 1. - 10e-15), 10e-15)
return -log(p) if y == 1. else -log(1. - p)
def data(f_train, D, dayfilter=None, dayfeature=True, counters=False):
''' GENERATOR: Apply hash-trick to the original csv row
and for simplicity, we one-hot-encode everything
INPUT:
path: path to training or testing file
D: the max index that we can hash to
YIELDS:
ID: id of the instance, mainly useless
x: a list of hashed and one-hot-encoded 'indices'
we only need the index since all values are either 0 or 1
y: y = 1 if we have a click, else we have y = 0
'''
device_ip_counter = {}
device_id_counter = {}
for t, row in enumerate(DictReader(f_train)):
# process id
ID = row['id']
del row['id']
# process clicks
y = 0.
if 'click' in row:
if row['click'] == '1':
y = 1.
del row['click']
# turn hour really into hour, it was originally YYMMDDHH
date = row['hour'][0:6]
row['hour'] = row['hour'][6:]
if dayfilter != None and not date in dayfilter:
continue
if dayfeature:
# extract date
row['wd'] = str(int(date) % 7)
row['wd_hour'] = "%s_%s" % (row['wd'], row['hour'])
if counters:
d_ip = row['device_ip']
d_id = row["device_id"]
try:
device_ip_counter[d_ip] += 1
device_id_counter[d_id] += 1
except KeyError:
device_ip_counter[d_ip] = 1
device_id_counter[d_id] = 1
row["ipc"] = str(min(device_ip_counter[d_ip], 8))
row["idc"] = str(min(device_id_counter[d_id], 8))
# build x
x = [0] # 0 is the index of the bias term
for key in row:
value = row[key]
# one-hot encode everything with hash trick
index = abs(hash(key + '_' + value)) % D
x.append(index)
yield t, ID, x, y
```
#### File: models/classifier/bayes_opt_xgb.py
```python
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import xgboost as xgb
from bayes_opt import BayesianOptimization
from bayes_opt.observer import JSONLogger
from bayes_opt.event import Events
class BayesOptXGB(object):
def __init__(self, X, y, topk=10, missing=None, metric='auc', objective='binary:logistic', fix_params={},
n_jobs=8, opt_seed=None):
"""
:param objective: 'binary:logistic', 'multi:softmax', 'reg:linear'
"""
self.data = xgb.DMatrix(X, y, missing=missing)
self.topk = topk
self.metric = metric
self.objective = objective
self.fix_params = fix_params # 固定不需要调节的参数
self.opt_seed = opt_seed
self.n_jobs = n_jobs
self.params_ls = []
self.params_ls_sk = []
self.params_best = {}
self.params_best_sk = {}
self.params_opt_df = None
self._iter_ls = []
if self.fix_params:
print('Fixed Params: \033[94m%s\033[0m\n' % self.fix_params)
@property
def best_model(self):
if self.params_best:
return xgb.train(dtrain=self.data, **self.params_best)
else:
print('\033[94m%s\033[0m\n' % "Please Run !")
def run(self, n_iter=5, save_log=False):
BoParams = {
'max_depth': (5, 16),
'min_child_weight': (1, 10),
'gamma': (0, 1),
'subsample': (0.6, 1),
'colsample_bytree': (0.6, 1),
'reg_alpha': (0, 1),
'reg_lambda': (0, 1),
}
optimizer = BayesianOptimization(self.__evaluator, BoParams, self.opt_seed)
if save_log:
logger = JSONLogger(path="./opt_xgb_logs.json")
optimizer.subscribe(Events.OPTMIZATION_STEP, logger)
if self.fix_params:
optimizer.set_bounds({k: (v, v) for k, v in self.fix_params.items()})
# optimizer.probe(
# {'max_depth': 7,
# 'min_child_weight': 1,
# 'gamma': 0,
# 'subsample': 0.8,
# 'colsample_bytree': 0.8,
# 'reg_alpha': 0.01,
# 'reg_lambda': 1})
gp_params = {"alpha": 1e-5, "n_restarts_optimizer": 3}
optimizer.maximize(init_points=3, n_iter=n_iter, acq='ucb', kappa=2.576, xi=0.0, **gp_params)
self.__get_params(optimizer)
def __evaluator(self, max_depth, gamma, min_child_weight, subsample, colsample_bytree,
reg_alpha, reg_lambda):
self.__params_sk = dict(
silent=True,
booster='gbtree',
objective=self.objective,
max_depth=int(max_depth),
learning_rate=0.01,
gamma=gamma, # min_split_gain
min_child_weight=min_child_weight,
subsample=subsample,
colsample_bylevel=0.8, # 每一层的列数
colsample_bytree=colsample_bytree,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
scale_pos_weight=1,
random_state=0,
n_jobs=self.n_jobs
)
params = self.__params_sk.copy()
params['eta'] = params.pop('learning_rate')
params['alpha'] = params.pop('reg_alpha')
params['lambda'] = params.pop('reg_lambda')
params['eval_metric'] = self.metric
_ = xgb.cv(params,
self.data,
num_boost_round=3600,
nfold=5,
early_stopping_rounds=100,
show_stdv=False,
stratified=False if 'reg' in self.objective else True,
as_pandas=False)['test-%s-mean' % self.metric]
self._iter_ls.append(len(_))
return -_[-1] if 'reg' in self.objective else _[-1]
def __get_params(self, optimizer):
self.params_opt_df = (
pd.concat([pd.DataFrame({'iter': self._iter_ls}), pd.DataFrame(optimizer.res)], 1)
.sort_values('target', ascending=False)
.reset_index(drop=True)[:self.topk])
for _, (i, p, _) in self.params_opt_df.iterrows():
params_sk = {**self.__params_sk, **p, **{'n_estimators': i}}
params_sk['max_depth'] = int(params_sk['max_depth'])
params_sk = {k: float('%.3f' % v) if isinstance(v, float) else v for k, v in params_sk.items()}
self.params_ls_sk.append(params_sk)
params = params_sk.copy()
params['eta'] = params.pop('learning_rate')
params['alpha'] = params.pop('reg_alpha')
params['lambda'] = params.pop('reg_lambda')
num_boost_round = params.pop('n_estimators')
self.params_ls.append({'params': params, 'num_boost_round': num_boost_round})
self.params_best = self.params_ls[0]
self.params_best_sk = self.params_ls_sk[0]
```
#### File: algo_ml/opt_params/CatBoostOptimizer.py
```python
import numpy as np
import catboost
from catboost import CatBoostClassifier
from sklearn.model_selection import cross_val_score, StratifiedKFold
from .Optimizer import Optimizer
class CatBoostOptimizer(Optimizer):
def __init__(self, X, y, cv=5, cv_seed=None, params_bounds=None, use_gpu=1):
super().__init__(X, y, cv, cv_seed, params_bounds)
self.params_bounds = params_bounds if params_bounds else \
dict(n_estimators=1000,
loss_function="Logloss",
eval_metric="AUC",
learning_rate=(0.001, 1),
l2_leaf_reg=(0, 100),
random_seed=666,
od_type="Iter",
depth=(2, 16),
early_stopping_rounds=300,
verbose=False,
border_count=64,
has_time=True)
if use_gpu:
self.workers = 1 # gpu 交叉验证只能单进程
self.params_bounds.update({'task_type': 'GPU'})
def objective(self, **params):
"""重写目标函数"""
# 纠正参数类型
params = self._round_params(params)
_params = {**self.params_bounds, **params}
# 核心逻辑 # TODO: 原生CV
self.clf = CatBoostClassifier(**_params)
scores = cross_val_score(self.clf, self.X, self.y, scoring='roc_auc',
cv=StratifiedKFold(self.cv, True, self.cv_seed))
return scores.mean()
```
#### File: algo_ml/opt_params/ParamsBounds.py
```python
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RFC
class ParamsBounds(object):
"""https://github.com/Jie-Yuan/optuna/blob/master/examples/lightgbm_simple.py"""
@property
def lgb(self):
"""https://www.cnblogs.com/wzdLY/p/9867719.html"""
pass
"""https://github.com/fmfn/BayesianOptimization/blob/master/examples/sklearn_example.py"""
@property
def svc(self):
estimator = SVC(random_state=123456)
pbounds = {"C": (0.001, 100), "gamma": (0.0001, 0.1)}
return estimator, pbounds
@property
def rfc(self):
estimator = RFC(random_state=123456, n_jobs=-1)
pbounds = {
"criterion": ('gini', 'entropy'),
"n_estimators": (32, 256),
"min_samples_split": (2, 25),
"max_features": (0.01, 1),
}
return estimator, pbounds
```
#### File: algo_nlp/api/baidu_post.py
```python
import json
import urllib
import requests
class BaiduPost(object):
"""
知识图谱: https://aip.baidubce.com/rpc/2.0/kg/v1/cognitive/entity_annotation
智能春联: https://aip.baidubce.com/rpc/2.0/nlp/v1/couplets
{ "text": "百度", "index": 0}
智能写诗: https://aip.baidubce.com/rpc/2.0/nlp/v1/poem
{ "text": "百度", "index": 0} # index默认为数值为0,即第一首诗。每换一次,数值加1即可,一定数量后会返回之前的作诗结果
"""
def __init__(self, api_key='<KEY>', secret_key='<KEY>'):
self.access_token = self._get_access_token(api_key, secret_key)
def predict(self, input_text, url):
url = url + '?charset=UTF-8&access_token=' + self.access_token
# the input is json format
# input_text = {'data': text}
r = requests.post(url, json=input_text)
return r.json()
def _get_access_token(self, api_key, secret_key):
host = f'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={api_key}&client_secret={secret_key}'
request = urllib.request.Request(host)
request.add_header('Content-Type', 'application/json; charset=UTF-8')
response = urllib.request.urlopen(request)
content = response.read()
return json.loads(content)['access_token']
if __name__ == '__main__':
from pprint import pprint
api = BaiduPost()
pprint(api.predict({'data': '周杰伦'},
'https://aip.baidubce.com/rpc/2.0/kg/v1/cognitive/entity_annotation'))
```
#### File: tql/algo_nlp/TencentWord2Vec.py
```python
from tqdm import tqdm
import pymongo
from concurrent.futures import ThreadPoolExecutor
from scipy.spatial.distance import pdist
from sklearn.metrics.pairwise import cosine_similarity
class TencentWord2Vec(object):
def __init__(self):
self.db = pymongo.MongoClient().ChineseEmbedding.TencentWord2Vec
def __getitem__(self, items):
if isinstance(items, str):
return self.get_vector([items])[0]
else:
return self.get_vector(items)
def cosine(self, w1, w2):
return 1 - pdist(self.get_vector((w1, w2)), 'cosine')[0]
def get_vector(self, words):
"""
from sklearn.metrics.pairwise import cosine_similarity
>>> a=[[1,3,2],[2,2,1]]
>>> cosine_similarity(a)
"""
with ThreadPoolExecutor(8 if len(words) > 8 else len(words)) as pool:
vecs = pool.map(self.__func, words)
return list(vecs)
def __insert_word2vec(self):
self.db.delete_many({})
for line in self.__reader():
_ = self.db.insert_one(line)
def __func(self, word):
return self.db.find_one({'word': word})['vector']
def __reader(self):
with open("/home/yuanjie/下载/Tencent_AILab_ChineseEmbedding/Tencent_AILab_ChineseEmbedding.txt") as f:
for idx, line in tqdm(enumerate(f), 'Loading ...'):
ws = line.strip().split(' ')
if idx:
yield {'word': ws[0], 'vector': [float(i) for i in ws[1:]]}
```
#### File: algo_nlp/utils/VecQuery.py
```python
import numpy as np
import nmslib
from collections import OrderedDict
class VecQuery(object):
"""
index.getDistance
index.knnQueryBatch
index.loadIndex
index.saveIndex
"""
def __init__(self, index=None):
"""
# TODO:index, id2word 都要存才能保证下次加载判断逻辑正确
:param index: index.loadIndex('*')
"""
self.id2word = {}
self.word2id = {}
self.index = index
def __call__(self, *args, **kwargs):
return self.query(*args, **kwargs)
def query(self, data, k=10, num_threads=4):
_ = self.index.knnQueryBatch(data, k, num_threads)
return list(map(self._parse_result, _))
def createIndex(self, words, vectors):
"""
:param words: 务必唯一不重复
:param vectors:
:return:
"""
if not self.id2word:
print('Index Create ...')
self.id2word, self.word2id = self._add(words)
self._create(vectors)
else:
print('Index Add...')
_add_ids = []
for id, word in enumerate(words):
if word not in self.word2id: # 忽略已存在的索引
self.word2id[word] = len(self.word2id)
_add_ids.append(id)
vectors = np.array(vectors)[_add_ids]
self._create(vectors, len(self.id2word) + np.arange(len(_add_ids))) # 增量更新索引
self.id2word = {j: i for i, j in self.word2id.items()}
def _add(self, words):
id2word = dict(enumerate(words))
word2id = {j: i for i, j in id2word.items()}
return id2word, word2id
def _create(self, vectors, ids=None):
# initialize a new index, using a HNSW index on Cosine Similarity
self.index = nmslib.init(method='hnsw', space='cosinesimil') if not self.index else self.index
self.index.addDataPointBatch(vectors, ids)
self.index.createIndex({'post': 2})
def _parse_result(self, pair):
return OrderedDict([(self.id2word[k], 1 - v) for k, v in zip(*pair)])
if __name__ == '__main__':
vq = VecQuery()
vq.createIndex(['a', 'b'], [[1, 2], [3, 4]])
print(vq.query([[1, 2]]))
print(vq.id2word, vq.word2id)
vq.createIndex(['a', 'c'], [[1, 2], [3, 3]])
print(vq.query([[1, 2], [1, 2]]))
print(vq.id2word, vq.word2id)
```
#### File: tql/data_eda/target_distributed.py
```python
def target_distributed(df, by, target_name='label'):
_ = df.groupby(by)[target_name].value_counts(1)
return _
```
#### File: ml/automl/AutoFeat_.py
```python
import featuretools
import featuretools as ft
import featuretools.variable_types as vt
from tqdm.auto import tqdm
from tql.pipe import cprint
from tql.utils.pandas_utils import reduce_mem_usage, duplicate_columns
from tql.ml.automl.primitives import *
from featuretools.selection import remove_low_information_features, remove_single_value_features, \
remove_highly_correlated_features
class AutoFeat(object):
def __init__(self, df, base_entity_id, target_entity_id, type2features, index=None, time_index=None,
secondary_time_index=None):
self.entity_id = base_entity_id # 表名
self.type2features = self._convert_type(type2features) # vt.xx
self.index = '__id' if index is None else index
self.target_entity_id = target_entity_id # self.index != '__id' => self.target_entity_id = self.entity_id
self.es = ft.EntitySet(id='MAIN')
self.es.entity_from_dataframe(
entity_id=self.entity_id,
dataframe=df, # df.copy()
index=self.index,
variable_types=self.variable_types,
time_index=time_index,
secondary_time_index=secondary_time_index
)
self._create_es()
def _create_es(self):
"""overwrite"""
for col in tqdm(self.normalize_entity_cols, desc='Normalize Entity'):
self.es.normalize_entity(self.entity_id, col, col)
def run_dfs(
self, max_depth=1,
features_only=True,
ignore_variables=None,
reduce_mem=False,
reduce_feats=True,
trans_primitives=None,
agg_primitives=None,
chunk_size=None,
n_jobs=1,
**kwargs
):
"""Deep Feature Synthesisf
agg_primitives (list[str or AggregationPrimitive], optional): List of Aggregation
Feature types to apply.
Default: ["sum", "std", "max", "skew", "min", "mean", "count", "percent_true", "num_unique", "mode"]
DateTime: ['time_since_last', 'time_since_first', 'trend']
trans_primitives (list[str or TransformPrimitive], optional):
List of Transform Feature functions to apply.
Default: ["day", "year", "month", "weekday", "haversine", "num_words", "num_characters"]
groupby_trans_primitives (list[str or :class:`.primitives.TransformPrimitive`], optional):
list of Transform primitives to make GroupByTransformFeatures with
"""
if ignore_variables is None:
# ignore_variables = [self.target_entity_id, self.index]
# ignore_variables = ["__id"] # 忽略单值id 会少了一些count特征
ignore_variables = []
if trans_primitives is None:
trans_primitives = [
"year", "month", "day", "hour", "minute", "week", "weekday", "is_weekend",
'time_since_previous',
# diff # https://stackoverflow.com/questions/60324672/how-is-time-since-previous-computed-in-featuretools
Quarter(),
]
_ = ft.dfs(
entityset=self.es,
target_entity=self.target_entity_id, # 具有唯一ID: 不重复id的base_es或者normalize_entity生成的唯一id es
features_only=features_only,
max_depth=max_depth,
ignore_variables={self.entity_id: ignore_variables},
chunk_size=chunk_size,
n_jobs=n_jobs,
verbose=1,
agg_primitives=agg_primitives,
trans_primitives=trans_primitives,
**kwargs
)
if features_only:
return _
else:
df_ = _[0].add_prefix(f'{self.entity_id}_').reset_index()
if reduce_feats:
cprint("remove_low_information_features")
df_ = remove_low_information_features(df_)
cprint("remove_single_value_features")
df_ = remove_single_value_features(df_, count_nan_as_value=True)
cprint("remove_duplicate_features")
dups = duplicate_columns(df_)
df_ = df_.drop(dups, 1)
if reduce_mem:
df_ = reduce_mem_usage(df_)
return df_
@property
def normalize_entity_cols(self):
types = [vt.Id, vt.Categorical, vt.Boolean]
cols = sum([self.type2features.get(type_, []) for type_ in types], [])
return [i for i in cols if i != self.index]
@property
def variable_types(self):
dic = {}
for type_, features in self.type2features.items():
dic.update(zip(features, len(features) * [type_]))
return dic
def _convert_type(self, type2features):
type2features_ = {}
for type_, features in type2features.items():
if isinstance(type_, str):
type2features_[vt.__getattribute__(type_)] = features
else:
type2features_[type_] = features
return type2features_
if __name__ == '__main__':
import pandas as pd
df = pd.DataFrame([[1, 2, 3], [2, 2, 3], [3, 2, 3]], columns=['uid', 'a', 'b'])
df['date'] = pd.date_range('2020-01-01', periods=3)
type2features = {
vt.Id: ['uid'],
vt.Categorical: ['a', 'b'],
vt.Datetime: ['date']
}
af = AutoFeat(df, 'test', 'test', type2features, index='uid') # base_entity 就是 base_entity_id
df_ft = af.run_dfs(3, False, trans_primitives=['cum_max'], reduce_feats=False)
print(df_ft)
```
#### File: ml/fm/fm_embedding.py
```python
import numpy as np
from lightfm import LightFM
from scipy.sparse import csr_matrix
class FMEmbedding(object):
def __init__(self, dim=128, **kwargs):
self.fm = LightFM(no_components=dim, random_state=666, **kwargs)
def fit(self, df, epochs=10, num_threads=30, verbose=True):
"""
:param df: ['uid', 'iid', 'rating']
:return:
"""
df.columns = ['uid', 'iid', 'rating']
csr_data = csr_matrix((df['rating'], (df['uid'], df['iid'])))
print(f"csr_data: {csr_data.shape}")
self.fm.fit(csr_data, epochs=epochs, num_threads=num_threads, verbose=verbose)
self.user_embeddings = np.ascontiguousarray(self.fm.user_embeddings)
self.item_embeddings = np.ascontiguousarray(self.fm.item_embeddings)
# if __name__ == '__main__':
# import faiss
# from ann import ANNFaiss
#
# ann = ANNFaiss()
#
# ann.train(item_embeddings, index_factory='IVF4000, Flat', metric=faiss.METRIC_INNER_PRODUCT, noramlize=True)
#
# ann.noramlize()
```
#### File: tql/regex/illegal_char.py
```python
import re
"""https://www.jianshu.com/p/4958bcdea12a"""
# def illegal_char(s):
# s = re.compile(r"""^[\u4e00-\u9fa5\u0041-\u005A\u0061-\u007A\u0030-\u0039!@#$%^&*()-=[]{}\\;':",./<>?/*\+]+""") \
# .sub('', s)
#
# return s
#
# if __name__ == '__main__':
# s = "《魔兽世界》官方宣布,为了维护玩家们的游戏体验," \
# "将为怀旧服高负载服务器的玩家们提供免费的角色定向转移服务," \
# "更多的详情将在晚些时候公布。官方也将在今晚18点新增两个PVP服务器阿什坎迪与怀特迈恩。 "
# print(illegal_char(s))
```
#### File: utils/config/limit_memory.py
```python
import resource
def limit_memory(memory=16):
"""
:param memory: 默认限制内存为 16G
:return:
"""
rsrc = resource.RLIMIT_AS
# res_mem=os.environ["RESOURCE_MEM"]
memlimit = memory * 1024 ** 3
resource.setrlimit(rsrc, (memlimit, memlimit))
# soft, hard = resource.getrlimit(rsrc)
print("memory limit as: %s G" % memory)
```
#### File: utils/config/set_plot.py
```python
import matplotlib.pyplot as plt
def set_plot():
"""
plt.rcParams['font.sans-serif'] = ['Simhei'] # 中文乱码的处理
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['axes.unicode_minus'] = False # 负号
plt.rcParams["text.usetex"] = False
plt.rcParams["legend.numpoints"] = 1
plt.rcParams["figure.figsize"] = (18, 9) # (12, 6)
plt.rcParams["figure.dpi"] = 128
plt.rcParams["savefig.dpi"] = plt.rcParams["figure.dpi"]
plt.rcParams["font.size"] = 12
plt.rcParams["pdf.fonttype"] = 42
"""
plt.rcParams['font.sans-serif'] = ['Simhei'] # 中文乱码的处理
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['axes.unicode_minus'] = False # 负号
plt.rcParams["text.usetex"] = False
plt.rcParams["legend.numpoints"] = 1
plt.rcParams["figure.figsize"] = (18, 9) # (12, 6)
plt.rcParams["figure.dpi"] = 128
plt.rcParams["savefig.dpi"] = plt.rcParams["figure.dpi"]
plt.rcParams["font.size"] = 12
plt.rcParams["pdf.fonttype"] = 42
print('Setting Success!')
```
#### File: utils/debug/__init__.py
```python
import pysnooper
@pysnooper.snoop()
def number_to_bits(number):
if number:
bits = []
while number:
number, remainder = divmod(number, 2)
bits.insert(0, remainder)
return bits
else:
return [0]
import numpy as np
@pysnooper.snoop()
def f():
return np.arange(100000).shape
if __name__ == '__main__':
f()
```
#### File: 0_DA/udfs/data_transform.py
```python
__title__ = 'data_transform'
__author__ = 'JieYuan'
__mtime__ = '2018/2/13'
# 聚合函数
class Agg(object):
@staticmethod
def mode(x):
"""
因mode需至少重复2次才可用,故需判断异常
"""
try:
_mode = x.mode()[0]
except:
return x.values[0]
else:
return _mode
@staticmethod
def count_zero(x):
return len(x) - np.sum(x)
@staticmethod
def count_null(x):
return x.isnull().sum()
@staticmethod
def gr_agg(df, by_name, col_name, *functions):
"""
positional argument follows keyword argument相对位置指代参数
"""
gr = df.groupby(by_name)
mapper = lambda x: col_name + '_' + x if x != by_name else by_name # col_name_sum
return gr[col_name].agg(functions).reset_index().rename(columns=mapper)
@staticmethod
def nlargest(df, by_name, col_name, n=1):
"""
col_name top K
"""
return df.sort_values(col_name).groupby(by_name).tail(n)
@staticmethod
def nsmallest(df, by_name, col_name, n=1):
return df.sort_values(col_name).groupby(by_name).head(n)
@classmethod
def nmost(cls, df, by_name, col_name, n=3):
"""
col_name 最多的前几个
"""
df = df.groupby([by_name, col_name], as_index=False)[col_name].agg({'_count': 'count'})
return df.pipe(cls.nlargest, by_name, '_count', n).drop('_count', 1).rename(
columns={col_name: col_name + '_nmost'})
@classmethod
def nleast(cls, df, by_name, col_name, n=3):
df = df.groupby([by_name, col_name], as_index=False)[col_name].agg({'_count': 'count'})
return df.pipe(cls.nsmallest, by_name, '_count', n).drop('_count', 1).rename(
columns={col_name: col_name + '_nleast'})
# 数据变形
class Reshape(object):
def __init__(self):
pass
@staticmethod
def explode(df, col, pat=None, drop_col=True):
"""
:param df:
:param col: col name
:param pat: String or regular expression to split on. If None, splits on whitespace
:param drop_col: drop col is Yes or No
:return: hive explode
"""
data = df.copy()
data_temp = data[col].str.split(pat=pat, expand=True).stack().reset_index(level=1, drop=True).rename(
col + '_explode')
if drop_col:
data.drop(col, 1, inplace=True)
return data.join(data_temp)
# df.a.str.split('|', expand=True) 'a|b|c' -> 'a', 'b', 'c'
@staticmethod
def crossJoin(df1, df2):
__addCol = lambda x: x.assign(__col=1)
return __addCol(df1).merge(__addCol(df2), on='__col').drop('__col', 1)
# lag/lead
def lag(df, by_name, col_name, n=1):
df = df.copy()
df[col_name + '_' + str(n)] = df.groupby(by_name)[col_name].shift(n)
return df
```
#### File: utils/pandas_utils/__init__.py
```python
import numpy as np
import pandas as pd
from tqdm.auto import tqdm
def duplicate_columns(frame):
"""keep='first'
https://stackoverflow.com/questions/14984119/python-pandas-remove-duplicate-columns/32961145#32961145
数据大:
dups = duplicate_columns(df)
df.drop(dups, 1)
数据小:
df.T.drop_duplicates().T
"""
frame = frame.fillna(-123456) # 处理缺失值
groups = frame.columns.to_series().groupby(frame.dtypes).groups
dups = []
for t, v in groups.items():
dcols = frame[v].to_dict(orient="list")
vs = list(dcols.values())
ks = list(dcols.keys())
lvs = len(vs)
for i in range(lvs):
for j in range(i + 1, lvs):
if vs[i] == vs[j]:
dups.append(ks[j]) # keep='first'
break
return dups
# reduce memory
def reduce_mem_usage(df):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024 ** 2
for col in tqdm(df.columns, desc="Reduce memory"):
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
# else:
# df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
if __name__ == '__main__':
df = pd.DataFrame([[1, 2, 3] * 10000, [2, 2, 3] * 10000, [3, 2, 3] * 10000])
import time
s = time.time()
reduce_mem_usage(df) # 34
print(time.time() - s)
```
#### File: utils/pandas_utils/split2cols.py
```python
import pandas as pd
s = pd.Series([[1, 2, 3], [4, 5, 6]])
s.tolist()
def split2cols(series: pd.Series, columns=None):
return pd.DataFrame(series.tolist(), columns=columns)
```
#### File: utils/string/json2cls.py
```python
class CreateOrder(object):
def __init__(self):
self.success = None
self.item = None
self.a = None
co = CreateOrder()
co.__dict__ = {'success': True,
'item': {'id': '1f4652c2f841f94f77b29a7836ffb971',
'title': '“乱港分子”黄之锋抵台 将拜会民进党',
'url': 'https://mb.yidianzixun.com/article/0N7BdC2A?s=mb&appid=mibrowser&miref=newsin_push_model',
'category': ['时政'],
'publishTime': '2019-09-03 11:03:02',
'keywords': ['时政', '香港', '台湾'],
'source': '快讯',
'valid': True,
'userTags': ['时政', '香港', '台湾'],
'crawlTime': 1567479784181,
'cpApi': 'cn-browser-push',
'isReservedDoc': True,
'updateTime': 1567479784181,
'bizIndexName': 'BROWSER_NEWS'}}
print(co.success)
print(type(co.item['keywords']))
import requests
requests.post()
```
#### File: utils/string/replace.py
```python
def replace(s, dic):
return s.translate(str.maketrans(dic))
if __name__ == '__main__':
print(replace('abcd', {'a': '8', 'd': '88'}))
``` |
{
"source": "Jie-Yuan/aizoo",
"score": 3
} |
#### File: aizoo/tab/feature_selector.py
```python
import scipy as sp
from functools import partial
from sklearn.utils import shuffle, check_random_state
from sklearn.preprocessing import scale as zscore
from sklearn.model_selection import train_test_split
# ME
from meutils.pipe import *
from aizoo.utils.model_utils import get_imp
from aizoo.utils.check_utils import check_classification
class SimpleFS(object):
"""粗筛
高缺失率:
低方差(高度重复值):0.5%~99.5%分位数内方差为0的初筛
高相关:特别高的初筛,根据重要性细筛 TODO
低重要性:
召回高IV:
"""
def __init__(self, df: pd.DataFrame, exclude=None):
self.to_drop_dict = {}
if exclude is None:
exclude = []
assert isinstance(exclude, list)
assert isinstance(df, pd.DataFrame)
self.df = df
exclude += df.select_dtypes(['datetime64[ns]', object]).columns.tolist()
print(f"Exclude Fetures: {exclude}")
if exclude:
self.feats = df.columns.difference(exclude).tolist()
else:
self.feats = df.columns.tolist()
def run(self):
df = self.df.copy()
with timer('干掉高缺失'):
self.to_drop_dict['filter_missing'] = self.filter_missing()
with timer('干掉低方差'):
self.to_drop_dict['filter_variance'] = self.filter_variance()
with timer('干掉高相关'):
pass
return df
def filter_missing(self, feats=None, threshold=0.95):
"""
:param feat_cols:
:param threshold:
:param as_na: 比如把-99当成缺失值
:return:
"""
if feats is None:
feats = self.feats
to_drop = self.df[feats].isna().mean()[lambda x: x > threshold].index.tolist()
print('%d features with greater than %0.2f missing values.' % (len(to_drop), threshold))
return to_drop
def _filter_variance(self, feat, df):
var = df[feat][lambda x: x.between(x.quantile(0.005), x.quantile(0.995))].var()
return '' if var else feat
def filter_variance(self, feats=None, max_worker=4):
if feats is None:
feats = self.feats
_filter_variance = partial(self._filter_variance, df=self.df)
with ProcessPoolExecutor(min(max_worker, len(feats))) as pool:
to_drop = pool.map(_filter_variance, tqdm(feats, 'Filter Variance ...'))
to_drop = [feat for feat in to_drop if feat]
print('%d features with 0 variance in 0.5 ~ 99.5 quantile.' % len(to_drop))
return to_drop
class FS(object):
def __init__(self, estimator, verbose=0,
importance_type='split', importance_normlize=True,
percent=95, alpha=0.05, two_step=True,
):
self.estimator = estimator
self.estimator_name = str(estimator).lower()
self.verbose = verbose
self.importance_type = importance_type # split/shap/permutaion
self.importance_type_normlize = importance_normlize
self.alpha = alpha
self.two_step = two_step
self.percent = percent * 100 if 0 <= percent <= 1 else percent
def fit(self, X, y, n_trials=10, sample_weight=None):
# setup variables for Boruta
n_sample, n_feat = X.shape
# holds the decision about each feature:
# 0 - default state = tentative in original code
# 1 - accepted in original code
# -1 - rejected in original code
dec_reg = np.zeros(n_feat)
# counts how many times a given feature was more important than the best of the shadow features
hit_reg = np.zeros(n_feat)
# these record the history of the iterations
imp_history = np.zeros((n_trials, n_feat)) # 记录真实特征历史重要性, 输出排序
imp_history[:] = np.nan
# 影子特征历史重要性最大值
sha_max_history = []
# main feature selection loop
for trial in tqdm(range(n_trials), desc='🔥'):
if not np.any(dec_reg == 0): # 存在未拒绝的就继续迭代
break
# make sure we start with a new tree in each iteration
seed = np.random.randint(0, 10 ** 6)
# 通过模型获取shap值
imp_real, imp_sha = self._add_shadows_get_imps(X, y, dec_reg, sample_weight, seed=seed + 2)
imp_history[trial] = imp_real # record importance history
# get the threshold of shadow importances we will use for rejection
imp_sha_max = np.percentile(imp_sha, self.percent)
sha_max_history.append(imp_sha_max)
# register which feature is more imp than the max of shadows
hits = np.where(imp_real > imp_sha_max)[0]
hit_reg[hits] += 1
# based on hit_reg we check if a feature is doing better than expected by chance
dec_reg = self._do_tests(dec_reg, hit_reg, trial + 1)
# we automatically apply R package's rough fix for tentative ones
confirmed = np.where(dec_reg == 1)[0]
tentative = np.where(dec_reg == 0)[0]
# which tentative to keep
tentative_median = np.median(imp_history[:, tentative], 0)
tentative_confirmed = np.where(tentative_median > np.median(sha_max_history))[0]
tentative = tentative[tentative_confirmed]
# basic result variables
self.n_features_ = confirmed.shape[0]
self.support_ = np.zeros(n_feat, dtype=np.bool)
self.support_[confirmed] = 1
self.support_weak_ = np.zeros(n_feat, dtype=np.bool)
self.support_weak_[tentative] = 1
# ranking, confirmed variables are rank 1
self.ranking_ = np.ones(n_feat, dtype=np.int)
# tentative variables are rank 2
self.ranking_[tentative] = 2
# selected = confirmed and tentative
selected = np.hstack((confirmed, tentative))
# all rejected features are sorted by importance history
not_selected = np.setdiff1d(np.arange(n_feat), selected)
# large importance values should rank higher = lower ranks -> *(-1)
imp_history_rejected = imp_history[:, not_selected] * -1
# update rank for not_selected features
if not_selected.shape[0] > 0:
# calculate ranks in each iteration, then median of ranks across feats
iter_ranks = self._nanrankdata(imp_history_rejected, axis=1)
rank_medians = np.nanmedian(iter_ranks, axis=0)
ranks = self._nanrankdata(rank_medians, axis=0)
# set smallest rank to 3 if there are tentative feats
if tentative.shape[0] > 0:
ranks = ranks - np.min(ranks) + 3
else:
# and 2 otherwise
ranks = ranks - np.min(ranks) + 2
self.ranking_[not_selected] = ranks
else:
# all are selected, thus we set feature supports to True
self.support_ = np.ones(n_feat, dtype=np.bool)
self.importance_type_history_ = imp_history
self.shadows_max_importance_history = sha_max_history
def _estimator_fit(self, X_train, X_test, y_train, y_test, train_w=None, test_w=None, seed=0):
# todo: oof estimator
# check estimator_name
self.estimator.set_params(random_state=seed) # 要不要都行,因为数据随机了
if 'lgb' in self.estimator_name:
self.estimator.fit(X_train, y_train,
sample_weight=train_w,
eval_sample_weight=test_w,
eval_set=[(X_train, y_train), (X_test, y_test)],
eval_metric=None,
eval_names=('Train', 'Valid'),
verbose=self.verbose,
early_stopping_rounds=100)
else:
raise ValueError('默认支持lgb,其他模型待支持')
def _add_shadows_get_imps(self, X, y, dec_reg, sample_weight=None, seed=0):
# find features that are tentative still
x_cur_ind = np.where(dec_reg >= 0)[0]
x_cur = X[:, x_cur_ind]
x_cur_w = x_cur.shape[1]
# deep copy the matrix for the shadow matrix
x_sha = x_cur.copy()
# make sure there's at least 5 columns in the shadow matrix for
while (x_sha.shape[1] < 5):
x_sha = np.c_[x_sha, x_sha]
# 打乱每列的值
x_sha = np.apply_along_axis(check_random_state(seed).permutation, 0, x_sha) # 打乱每列的值
# get importance of the merged matrix
imp = self._feature_importances(np.c_[x_cur, x_sha], y, sample_weight, seed + 1) # (x_cur, x_sha)
# separate importances of real and shadow features
imp_sha = imp[x_cur_w:]
imp_real = np.zeros(X.shape[1])
imp_real[:] = np.nan
imp_real[x_cur_ind] = imp[:x_cur_w]
return imp_real, imp_sha
def _feature_importances(self, X, y, sample_weight=None, seed=0):
arrays = [X, y] if sample_weight is None else [X, y, sample_weight]
args = train_test_split(*arrays, random_state=seed, stratify=y if check_classification(y) else None)
self._estimator_fit(*args, seed=seed + 1) # estimator fitted
imp = get_imp(self.estimator, X, self.importance_type)
return zscore(imp) if self.importance_type_normlize else imp
def _nanrankdata(self, X, axis=1):
"""
Replaces bottleneck's nanrankdata with scipy and numpy alternative.
"""
ranks = sp.stats.mstats.rankdata(X, axis=axis)
ranks[np.isnan(X)] = np.nan
return ranks
def _do_tests(self, dec_reg, hit_reg, _iter):
active_features = np.where(dec_reg >= 0)[0]
hits = hit_reg[active_features]
# get uncorrected p values based on hit_reg
to_accept_ps = sp.stats.binom.sf(hits - 1, _iter, .5).flatten()
to_reject_ps = sp.stats.binom.cdf(hits, _iter, .5).flatten()
if self.two_step:
# two step multicor process
# first we correct for testing several features in each round using FDR
to_accept = self._fdrcorrection(to_accept_ps, alpha=self.alpha)[0]
to_reject = self._fdrcorrection(to_reject_ps, alpha=self.alpha)[0]
# second we correct for testing the same feature over and over again
# using bonferroni
to_accept2 = to_accept_ps <= self.alpha / float(_iter)
to_reject2 = to_reject_ps <= self.alpha / float(_iter)
# combine the two multi corrections, and get indexes
to_accept *= to_accept2
to_reject *= to_reject2
else:
# as in th original Boruta, we simply do bonferroni correction
# with the total n_feat in each iteration
to_accept = to_accept_ps <= self.alpha / float(len(dec_reg))
to_reject = to_reject_ps <= self.alpha / float(len(dec_reg))
# find features which are 0 and have been rejected or accepted
to_accept = np.where((dec_reg[active_features] == 0) * to_accept)[0]
to_reject = np.where((dec_reg[active_features] == 0) * to_reject)[0]
# updating dec_reg
dec_reg[active_features[to_accept]] = 1
dec_reg[active_features[to_reject]] = -1
return dec_reg
def _fdrcorrection(self, pvals, alpha=0.05):
"""
Benjamini/Hochberg p-value correction for false discovery rate, from
statsmodels package. Included here for decoupling dependency on statsmodels.
Parameters
----------
pvals : array_like
set of p-values of the individual tests.
alpha : float
error rate
Returns
-------
rejected : array, bool
True if a hypothesis is rejected, False if not
pvalue-corrected : array
pvalues adjusted for multiple hypothesis testing to limit FDR
"""
pvals = np.asarray(pvals)
pvals_sortind = np.argsort(pvals)
pvals_sorted = np.take(pvals, pvals_sortind)
nobs = len(pvals_sorted)
ecdffactor = np.arange(1, nobs + 1) / float(nobs)
reject = pvals_sorted <= ecdffactor * alpha
if reject.any():
rejectmax = max(np.nonzero(reject)[0])
reject[:rejectmax] = True
pvals_corrected_raw = pvals_sorted / ecdffactor
pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]
pvals_corrected[pvals_corrected > 1] = 1
# reorder p-values and rejection mask to original order of pvals
pvals_corrected_ = np.empty_like(pvals_corrected)
pvals_corrected_[pvals_sortind] = pvals_corrected
reject_ = np.empty_like(reject)
reject_[pvals_sortind] = reject
return reject_, pvals_corrected_
```
#### File: aizoo/tuner/optimizers.py
```python
from meutils.pipe import *
from sklearn.metrics import *
from aizoo.tuner.base import optuna, Tuner
from aizoo.tab.models import LGBMOOF
from aizoo.utils.check_utils import check_classification
class F1Optimizer(Tuner):
def __init__(self, search_space, y, y_pred, **kwargs):
super().__init__(search_space, **kwargs)
self.y = y
self.y_pred = y_pred
def objective(self, trial: optuna.trial.Trial):
params = self.trial_choice(trial)
y_pred_ = np.where(np.array(self.y_pred) > params['threshold'], 1, 0)
return f1_score(self.y, y_pred_)
class LGBOptimizer(Tuner):
def __init__(self, search_space, X, y, feval, fit_params=None, oof_fit_params=None, **kwargs):
"""
@param search_space:
@param X:
@param y:
@param feval:
@param fit_params: 原生fit参数
@param oof_fit_params: dict(sample_weight=None, X_test=None, feval=None, cv=5, split_seed=777, target_index=None)
@param kwargs:
"""
super().__init__(search_space, **kwargs)
self.X = X
self.y = y
self.feval = feval
self.fit_params = fit_params
self.oof_fit_params = oof_fit_params if oof_fit_params is not None else {}
def objective(self, trial: optuna.trial.Trial):
params = self.trial_choice(trial)
task = 'Classifier' if check_classification(self.y) else 'Regressor'
_ = (
LGBMOOF(params=params, fit_params=self.fit_params, task=task)
.fit(self.X, self.y, feval=self.feval, **self.oof_fit_params)
)
if _ is None:
raise ValueError("Target is None⚠️")
return _
if __name__ == '__main__':
y = [1, 1, 0, 0]
y_pred = [0.1, 0.2, 0.3, 0.4]
# opt = F1Optimizer({'threshold': 0.1}, y, y_pred)
opt = F1Optimizer("./search_space/f1.yaml", y, y_pred)
opt.optimize(
100,
direction='minimize',
study_name='test',
storage="sqlite:////Users/yuanjie/Desktop/Projects/Python/aizoo/aizoo/tuner/test.db",
load_if_exists=True # cli --skip-if-exists
)
# optuna-dashboard sqlite:////Users/yuanjie/Desktop/Projects/Python/aizoo/aizoo/tuner/test.db
print(opt.trials_dataframe)
``` |
{
"source": "Jie-Yuan/autokeras",
"score": 2
} |
#### File: autokeras/autokeras/layer_utils.py
```python
import tensorflow as tf
from sklearn import model_selection
from tensorflow.python.util import nest
from autokeras import const
def get_global_average_pooling_layer(shape):
return [tf.keras.layers.GlobalAveragePooling1D,
tf.keras.layers.GlobalAveragePooling2D,
tf.keras.layers.GlobalAveragePooling3D][len(shape) - 3]
def get_global_max_pooling_layer(shape):
return [tf.keras.layers.GlobalMaxPool1D,
tf.keras.layers.GlobalMaxPool2D,
tf.keras.layers.GlobalMaxPool3D][len(shape) - 3]
def format_inputs(inputs, name=None, num=None):
inputs = nest.flatten(inputs)
if not isinstance(inputs, list):
inputs = [inputs]
if num is None:
return inputs
if not len(inputs) == num:
raise ValueError('Expected {num} elements in the '
'inputs list for {name} '
'but received {len} inputs.'.format(num=num,
name=name,
len=len(inputs)))
return inputs
def split_train_to_valid(x, y):
# Generate split index
validation_set_size = int(len(x[0]) * const.Constant.VALIDATION_SET_SIZE)
validation_set_size = min(validation_set_size, 500)
validation_set_size = max(validation_set_size, 1)
train_index, valid_index = model_selection.train_test_split(
range(len(x[0])),
test_size=validation_set_size,
random_state=const.Constant.SEED)
# Split the data
x_train = []
y_train = []
x_val = []
y_val = []
for temp_x in x:
x_train.append(temp_x[train_index])
x_val.append(temp_x[valid_index])
for temp_y in y:
y_train.append(temp_y[train_index])
y_val.append(temp_y[valid_index])
return (x_train, y_train), (x_val, y_val)
def get_name_scope():
with tf.name_scope('a') as scope:
name_scope = scope[:-2]
return name_scope
``` |
{
"source": "Jie-Yuan/BaiduAI-sdk",
"score": 3
} |
#### File: Jie-Yuan/BaiduAI-sdk/kg.py
```python
import json
import urllib
import requests
import json
import urllib
import requests
class BaiDuNLP(object):
def __init__(self, api_key='<KEY>', secret_key='<KEY>'):
self.access_token = None
self._get_access_token(api_key, secret_key)
def predict(self, text, api_url):
"""
api_url = 'https://aip.baidubce.com/rpc/2.0/kg/v1/cognitive/entity_annotation?charset=UTF-8&access_token='
api = BaiDuNLP()
api.predict('周杰伦', api_url)
"""
url = api_url + self.access_token
# the input is json format
input_text = {'data': text}
input_text = json.dumps(input_text)
r = requests.post(url, data=input_text, headers={'Content-Type': 'application/json'})
return r.json()
def _get_access_token(self, api_key, secret_key):
host = f'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={api_key}&client_secret={secret_key}'
request = urllib.request.Request(host)
request.add_header('Content-Type', 'application/json; charset=UTF-8')
response = urllib.request.urlopen(request)
content = response.read()
self.access_token = json.loads(content)['access_token']
``` |
{
"source": "jieyuan-bi/finduofagroup",
"score": 2
} |
#### File: findclassmate/users/models.py
```python
from django.db import models
from courses.models import ClassModel
from django.utils import timezone
from PIL import Image
baseurl = 'http://192.168.0.209:8000'
class UserModel(models.Model):
name = models.CharField(max_length=255, unique=True)
password = models.CharField(max_length=255)
gender = models.CharField(max_length=255)
classes = models.ManyToManyField(ClassModel) #the class joined
role = models.IntegerField(default=0) #guest-0, user-1, admin-2
create_time = models.DateTimeField(default=timezone.now)
phone = models.CharField(max_length=255)
wechat = models.CharField(max_length=255)
email = models.EmailField()
major = models.CharField(max_length=255)
onecard = models.ImageField(upload_to='uploads/')
class Meta:
ordering = ('name',)
db_table = 'users_user'
def get_onecard(self):
if self.onecard:
return baseurl + self.onecard.url
return ''
```
#### File: findclassmate/users/utils.py
```python
import logging
from django.db.models import Q
from .models import *
from passlib.context import CryptContext
# Get an instance of a logger
logger = logging.getLogger(__name__)
#encrypt the password
def encryptPassword(password):
pwd_context = CryptContext(
schemes=["pbkdf2_sha256"],
default="<PASSWORD>",
pbkdf2_sha256__default_rounds=30000
)
return pwd_context.encrypt(password)
#check if the password match the hashed one
#@return
def checkPassword(account, password):
role = UserModel.objects.get(name=account).role
pwd_context = CryptContext(
schemes=["pbkdf2_sha256"],
default="pbkdf2_sha256",
pbkdf2_sha256__default_rounds=30000
)
hashed = UserModel.objects.get(name=account).password
# logger.info('[Users] check_password' +hashed )
validity = pwd_context.verify(password, hashed)
return validity, role
#create super super user
def createAdmin():
name='jieyuan'
password='<PASSWORD>'
password = encryptPassword(password)
UserModel.objects.create(name=name,password=password,role=2)
return
#search user with query
#@return serialized users
def searchUser(query):
users = UserModel.objects.filter(Q(name__icontains=query))
result = []
for user in users:
data = {'name':user.name,'gender':user.gender,'phone':user.phone,'wechat':user.wechat,'email':user.email,'onecard':user.get_onecard(),
'role':user.role,'create_time':user.create_time, 'major':user.major}
result.append(data)
return result
```
#### File: findclassmate/users/views.py
```python
from re import A
from django.shortcuts import render
from django.template import response
from django.db import IntegrityError
from rest_framework.views import APIView
from rest_framework.response import Response
from .utils import *
from .models import *
import logging
import os
from django.conf import settings
from django.core.files import File
# Get an instance of a logger
logger = logging.getLogger(__name__)
# returns all the catalogues and subjects
class SignupView(APIView):
# permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
name = request.data.get('name')
email = request.data.get('email')
phone = request.data.get('phone')
password = request.data.get('password')
wechat = request.data.get('wechat')
major = request.data.get('major')
gender = request.data.get('gender')
onecard = request.FILES['onecard']
response={'success':'1'}
safe_password = <PASSWORD>(password)
logger.info('onecard',onecard)
image_name = onecard.name
image_path = os.path.join(settings.MEDIA_ROOT, image_name)
#create a dir when not exist
try:
os.mkdir(settings.MEDIA_ROOT)
except Exception:
pass
#save file to local
f = open(image_path,'wb')
for i in onecard.chunks():
f.write(i)
f.close()
# try to create a new registered user
try:
UserModel.objects.create(name=name, gender=gender, email=email, major=major, phone=phone, wechat=wechat,
password=<PASSWORD>,onecard=image_name)
# if the username already exist, give a flag
except IntegrityError as e:
response['success']='-1'
except Exception as e:
# logger.info("[Users] Fail to create user!\n", e)
response['success']='0'
return Response(response)
class LoginView(APIView):
# permission_classes = (IsAuthenticated,)
def post(self, request, *args, **kwargs):
account = request.data.get('account')
password = request.data.get('password')
response={'success':1}
#create admin_account,super super user
# try:
# createAdmin()
# except Exception:
# pass
try:
check_password, role = checkPassword(account, password)
except Exception:
response['success']=0
return Response(response)
# logger.info(check_password )
if not check_password:
response['success']=0
response['role']=role
return Response(response)
class getUsersView(APIView):
def get(self, request, *args, **kwargs):
result = UserModel.objects.all()
response={}
response['users']=[]
for i in result:
user = {'name':i.name,'gender':i.gender,'phone':i.phone,'wechat':i.wechat,'email':i.email,'onecard':i.get_onecard(),
'role':i.role,'create_time':i.create_time, 'major':i.major}
response['users'].append(user)
return Response(response)
class adminSearchView(APIView):
def get(self, request, *args, **kwargs):
query = request.GET.get('query')
logger.info('query:', query)
users = searchUser(query)
response={}
response['users']=users
return Response(response)
class confirmOnecardView(APIView):
def post(self, request, *args, **kwargs):
name = request.data.get('name')
response={}
response['success']=0
#supersuper user cannot change
# logger.info('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!name:',name)
if name=='jieyuan':
return Response(response)
try:
user = UserModel.objects.get(name=name)
user.role=1
user.save()
response['success']=1
except Exception as e:
pass
return Response(response)
``` |
{
"source": "Jie-Yuan/ChatGo",
"score": 3
} |
#### File: chatgo/utils/topnews.py
```python
import pandas as pd
def get(idx, url='http://web.algo.browser.miui.srv/data/feed/recall?q=topnews'):
_ = pd.read_html(url, encoding='utf-8')[0][1].tolist()[:idx]
return ':\n' + '\n\n'.join(_)
``` |
{
"source": "Jie-Yuan/CTRZOO",
"score": 3
} |
#### File: ctrzoo/dateset/Dataset.py
```python
import numpy as np
import pandas as pd
import tensorflow as tf
from functools import partial
class Dataset(object):
"""
https://tensorflow.google.cn/guide/data?hl=zh_cn
"""
def __init__(self, batchsize=128, cache_filename=""):
self.batchsize = batchsize
self.cache_filename = cache_filename
def from_cache(self, inputs, outputs=None, is_test=False, shuffle_buffer_size=10000, shuffle_seed=None):
"""
多输入多输出inputs/outputs对应元组
"""
if isinstance(inputs, tuple): # Multi-inputs
pass
elif isinstance(inputs, (list, np.ndarray, dict, pd.DataFrame)):
if isinstance(inputs, pd.DataFrame):
inputs = inputs.to_dict('list')
else:
raise ValueError("`inputs` Data Type Error")
if outputs is None:
tensors = (inputs,)
else:
tensors = (inputs, outputs)
# Common
ds = tf.data.Dataset.from_tensor_slices(tensors)
if outputs is None or is_test: # 避免测试集shuffle
pass
else:
ds = ds.shuffle(shuffle_buffer_size, seed=shuffle_seed)
ds = ds.batch(self.batchsize)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE) # todo: .repeat() 更乱一些???在每个epoch内将图片打乱组成大小为32的batch,并重复10次。
return ds
def from_generator(self):
# TODO: 增加对文件的操作(txt/tfrecord)
# tf.data.Dataset.from_generator()
pass
def _from_np_array(self, array):
# Common
buffer_size = len(array)
ds = tf.data.Dataset.from_tensor_slices(array)
ds = ds.shuffle(buffer_size, seed=None)
ds = ds.batch(self.batchsize)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
return ds
def _from_pd_dataframe(self, df: pd.DataFrame, label="label"):
"""
import tensorflow as tf
dataset = tf.data.Dataset.from_tensor_slices(({"a": [1, 2], "b": [3, 4]}, [0, 1]))
print(list(dataset.as_numpy_iterator()))
:param df:
:param label:
:return:
"""
if label and label in df.columns:
df = df.drop(labels=[label], axis=1)
labels = df[label]
tensors = (df.to_dict('list'), labels) # df.to_dict('series')
else:
tensors = df.to_dict('list')
# Common
buffer_size = len(df)
ds = tf.data.Dataset.from_tensor_slices(tensors)
ds = ds.shuffle(buffer_size, seed=None)
ds = ds.batch(self.batchsize)
ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
# features_dataset = tf.data.Dataset.from_tensor_slices(features)
# labels_dataset = tf.data.Dataset.from_tensor_slices(labels)
# tf.data.Dataset.zip((features_dataset, labels_dataset))
return ds
def from_tfrecord(self,
feature_dict: dict,
file_pattern: str = None,
label='label',
file_shuffle=True,
file_shuffle_seed=666,
shuffle_buffer_size=0):
"""注意缺失值问题
ds = Dataset()
feature_dict = {
'id': tf.io.FixedLenFeature((), tf.int64, default_value=0),
'feature': tf.io.FixedLenFeature((), tf.int64, default_value=0) # default_value=tf.zeros([], dtype=tf.float32)
}
ds = ds.from_tfrecord(feature_dict, '/Users/yuanjie/Desktop/Projects/Spark/MIPush/test-output.tfrecord/part*')
:param feature_dict:
:param file_pattern:
:param shuffle:
:param seed:
:param shuffle_buffer_size:
:return:
"""
# TODO: cache
# assert isinstance(filename, str), f"file path error: {filename}"
#
# if Path(filename).is_dir():
# filename = list(map(str, Path(filename).glob(glob_regex))) # tf.data.Dataset.list_files
# parser_fn = partial(tf.io.parse_single_example, features=feature_dict)
parser_fn = partial(self._tfrecord_parser_fn, features=feature_dict, label=label)
filenames = tf.data.Dataset.list_files(file_pattern, file_shuffle, file_shuffle_seed)
# ds
ds = tf.data.TFRecordDataset(filenames, num_parallel_reads=tf.data.experimental.AUTOTUNE)
ds = ds.map(parser_fn)
if shuffle_buffer_size > 0:
ds = ds.shuffle(shuffle_buffer_size, seed=None, reshuffle_each_iteration=True)
ds = ds.batch(self.batchsize)
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) # 若内存泄露,需手动指定
return ds
def _tfrecord_parser_fn(self, example_photo, features, label='label'):
_ = tf.io.parse_single_example(example_photo, features=features)
return _, _.pop(label) # X, y
```
#### File: features/transformer/smooth_utils.py
```python
import numpy as np
def walson_ctr(num_click, num_pv, z=1.96):
""":arg
威尔逊
https://mp.weixin.qq.com/s/rLP1wsS0a71q5RA7NQcjdQ
"""
p = num_click / num_pv
if p > 0.9:
return 0.0
n = num_pv
A = p + z ** 2 / (2 * n)
B = np.sqrt(p * (1 - p) / n + z ** 2 / (4 * (n ** 2)))
C = z * B
D = 1 + z ** 2 / n
ctr = (A - C) / D
return ctr
``` |
{
"source": "jieyuanCUHK/DeeperCount",
"score": 3
} |
#### File: DeeperCount/02_source_code_for_training/4_get_ready_for_training.py
```python
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import numpy as np
import os
import glob
import cv2
import cv2
import sys
#from libtiff import TIFF
class myAugmentation(object):
def __init__(self, aug_merge_path="./03_image_directory/After_augmentation_primary", aug_train_path="./03_image_directory/Final_train", aug_label_path="./03_image_directory/Final_label", img_type="tif"):
self.img_type = img_type
self.aug_merge_path = aug_merge_path
self.aug_train_path = aug_train_path
self.aug_label_path = aug_label_path
self.aug_imgs=glob.glob(aug_merge_path+"/*."+img_type)
self.slices = len(self.aug_imgs)
def splitMerge(self):
"""
split merged image (after image augmentation) apart
"""
path_merge = self.aug_merge_path
path_train = self.aug_train_path
path_label = self.aug_label_path
path = path_merge + "/"
train_imgs = glob.glob(path+"/*."+self.img_type)
savedir = path_train + "/"
if not os.path.lexists(savedir):
os.mkdir(savedir)
savedir = path_label + "/"
if not os.path.lexists(savedir):
os.mkdir(savedir)
count=0
for imgname in train_imgs:
midname = imgname[imgname.rindex("/")+1:imgname.rindex("."+self.img_type)]
img = cv2.imread(imgname)
img_train = img[:,:,2]#cv2 read image rgb->bgr
img_label = img[:,:,0]
cv2.imwrite(path_train+"/"+midname+"_train"+"."+self.img_type,img_train)
cv2.imwrite(path_label+"/"+midname+"_label"+"."+self.img_type,img_label)
count=count+1
if count==len(train_imgs):
break
class dataProcess(object):
def __init__(self, out_rows, out_cols, data_path, label_path, npy_path = "./", img_type = "tif"):
self.out_rows = out_rows
self.out_cols = out_cols
self.data_path = data_path
self.label_path = label_path
self.img_type = img_type
self.npy_path = npy_path
def create_train_data_aug(self):
i = 0
print('Creating training images')
print('-'*30)
imgs = glob.glob(self.data_path+"/*."+self.img_type)
corr_labels= glob.glob(self.label_path+"/*."+self.img_type)
imgs.sort()
corr_labels.sort()
imgdatas = np.ndarray((len(imgs),self.out_rows,self.out_cols,1), dtype=np.uint8)
imglabels = np.ndarray((len(imgs),self.out_rows,self.out_cols,1), dtype=np.uint8)
for itering in zip(imgs,corr_labels):
imgname=itering[0]
labelname=itering[1]
midname = imgname[imgname.rindex("/")+1:]
midname_label= labelname[labelname.rindex("/")+1:]
img = load_img(self.data_path + "/" + midname,grayscale = True)
label = load_img(self.label_path + "/" + midname_label,grayscale = True)
img = img_to_array(img)
label = img_to_array(label)
#img = cv2.imread(self.data_path + "/" + midname,cv2.IMREAD_GRAYSCALE)
#label = cv2.imread(self.label_path + "/" + midname,cv2.IMREAD_GRAYSCALE)
#img = np.array([img])
#label = np.array([label])
imgdatas[i] = img
imglabels[i] = label
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, len(imgs)))
i += 1
print('Creation finished')
print('-'*30)
np.save(self.npy_path + '/aug_imgs_train.npy', imgdatas)
np.save(self.npy_path + '/aug_imgs_mask_train.npy', imglabels)
print('Saving to .npy files completed')
if __name__ == "__main__":
au=myAugmentation()
au.splitMerge()
mydata_t= dataProcess(512,512,data_path="./03_image_directory/Final_train",label_path="./03_image_directory/Final_label",npy_path="./03_image_directory/")
mydata_t.create_train_data_aug()
``` |
{
"source": "Jie-Yuan/Deeps",
"score": 2
} |
#### File: serving/onnx/inference.py
```python
import numpy as np
import onnxruntime as rt
class Inference(object):
def __init__(self, path="./iris.onnx"):
self.sess = rt.InferenceSession(path)
self._describe()
def run(self, X):
_ = self.sess.get_inputs()[0]
self.input_name = _.name
# self.input_shape = tuple(_.shape)
# assert X.shape == self.input_shape
if not isinstance(X[0][0], np.float32):
X = X.astype(np.float32)
return self.sess.run(None, {self.input_name: X}) # 概率输出 or 类别输出
def _describe(self):
for attr_ in ['get_inputs', 'get_outputs']:
_puts = self.sess.__getattribute__(attr_)()
for i, _put in enumerate(_puts, 1):
print({attr_.split('_')[-1][:-1].title() + str(i): (_put.type, _put.name, _put.shape)})
```
#### File: inn/layers/_Prediction.py
```python
import tensorflow as tf
from tensorflow.keras.initializers import Zeros
class Prediction(tf.keras.layers.Layer):
def __init__(self, task='binary', use_bias=False, num_class=0, name='Prediction', **kwargs):
"""https://github.com/shenweichen/DeepCTR/issues/211
:param task: ["binary", "multiclass", "regression"]
:param use_bias:
:param kwargs:
"""
super().__init__(name=name, **kwargs)
if task not in ["binary", "multiclass", "regression"]:
raise ValueError("task must be binary, multiclass or regression")
self.task = task
self.use_bias = use_bias
self.num_class = num_class
def build(self, input_shape):
super().build(input_shape)
if self.use_bias:
self.global_bias = self.add_weight(
shape=(self.num_class,) if self.task == "multiclass" else (1,),
initializer=Zeros(), name="global_bias")
def call(self, inputs, **kwargs):
x = inputs
if self.use_bias:
x = tf.nn.bias_add(x, self.global_bias, data_format='NHWC')
if self.task == "binary":
x = tf.sigmoid(x) # [[pred1], [pred2]]
output = tf.reshape(x, (-1, 1))
elif self.task == "multiclass":
x = tf.nn.softmax(x)
output = tf.reshape(x, (-1, self.num_class))
else:
# regression
output = x
return output
def compute_output_shape(self, input_shape):
if self.task == "multiclass":
return (None, self.num_class)
else:
return (None, 1)
def get_config(self, ):
base_config = super().get_config()
config = {'task': self.task, 'use_bias': self.use_bias, 'num_class': self.num_class}
return {**base_config, **config}
```
#### File: inn/models/BaseModel.py
```python
from abc import abstractmethod
from typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union
import tensorflow as tf
from tensorflow.python.feature_column.feature_column_v2 import \
FeatureColumn, NumericColumn, CategoricalColumn, SequenceCategoricalColumn, EmbeddingColumn, \
BucketizedColumn
class BaseModel(object):
def __init__(self, # todo: 初始化基础属性
feature_columns: List[FeatureColumn] = None,
task='binary',
num_class=None,
early_stopping_epochs=3):
self.feature_columns = feature_columns
self.task = task
if task == 'multiclass':
assert num_class is not None, "num_class must not be None"
self.num_class = num_class
self.early_stopping_epochs = early_stopping_epochs
@abstractmethod
def model(self):
raise NotImplementedError()
def compile(self,
optimizer='rmsprop',
loss=None,
metrics=None,
loss_weights=None,
sample_weight_mode=None,
weighted_metrics=None,
target_tensors=None,
distribute=None,
**kwargs, ):
self.model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode,
weighted_metrics=weighted_metrics,
target_tensors=target_tensors,
distribute=distribute,
**kwargs, )
def plot(self, to_file='model.png',
show_shapes=False,
show_layer_names=True,
rankdir='TB',
expand_nested=False,
dpi=96):
tf.keras.utils.plot_model(self.model, to_file=to_file,
show_shapes=show_shapes,
show_layer_names=show_layer_names,
rankdir=rankdir,
expand_nested=expand_nested,
dpi=dpi)
def callbacks(self):
callbacks_list = [
tf.keras.callbacks.ReduceLROnPlateau(factor=0.9, patience=2, verbose=1, min_lr=0.0001),
# annealer = LearningRateScheduler(lambda x: min(0.01 * 0.9 ** x, 0.001), verbose=1)
tf.keras.callbacks.ModelCheckpoint("filepath",
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=False,
mode='auto',
save_freq='epoch'),
tf.keras.callbacks.EarlyStopping(monitor='val_loss',
min_delta=0,
patience=self.early_stopping_epochs,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False)
]
return callbacks_list
# todo: 学习率clr_callback, WandbCallback
```
#### File: tricks/online_info/get_label.py
```python
import numpy as np
import pandas as pd
def get_threshold_or_label(preds, psr, only_return_threshold=False):
"""默认: 指标>阈值为正样本
:param psr: Positive sample ratio
:return:
"""
threshold = pd.Series(preds).quantile(1 - psr)
if only_return_threshold:
return threshold
else:
return np.where(preds > threshold, 1, 0)
# def threshold_search(y_true, y_proba):
# best_threshold = 0
# best_score = 0
# for threshold in tqdm([i * 0.01 for i in range(100)]):
# score = f1_score(y_true=y_true, y_pred=y_proba > threshold)
# if score > best_score:
# best_threshold = threshold
# best_score = score
# search_result = {'threshold': best_threshold, 'f1': best_score}
# return search_result
``` |
{
"source": "Jie-Yuan/DeepTricks",
"score": 3
} |
#### File: deeptricks/layers/DNN.py
```python
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.initializers import Zeros, glorot_normal
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras.regularizers import l2
class DNN(Layer):
def __init__(self, hidden_units, activation='relu', l2_reg=0, dropout_rate=0, use_bn=False, seed=1024, **kwargs):
self.hidden_units = hidden_units
self.activation = activation
self.dropout_rate = dropout_rate
self.seed = seed
self.l2_reg = l2_reg
self.use_bn = use_bn
self.hidden_units_index = range(len(self.hidden_units))
super(DNN, self).__init__(**kwargs)
def build(self, input_shape):
input_size = input_shape[-1]
hidden_units = [int(input_size)] + list(self.hidden_units)
# range(len(self.hidden_units))
self.kernels = []
self.bias = []
for i in range(len(self.hidden_units)):
kernel = self.add_weight(
name='kernel' + str(i),
shape=(hidden_units[i], hidden_units[i + 1]),
initializer=glorot_normal(seed=self.seed),
regularizer=l2(self.l2_reg),
trainable=True
)
bias = self.add_weight(
name='bias' + str(i),
shape=(self.hidden_units[i],),
initializer=Zeros(),
trainable=True
)
self.kernels.append(kernel)
self.bias.append(bias)
if self.use_bn:
self.bn_layers = [tf.keras.layers.BatchNormalization()] * len(self.hidden_units)
self.dropout_layers = [tf.keras.layers.Dropout()] * len(self.hidden_units)
self.activation_layers = [activation_layer(self.activation)] * len(self.hidden_units)
super(DNN, self).build(input_shape) # Be sure to call this somewhere! 传递信息?
def call(self, inputs, training=None, **kwargs):
deep_input = inputs
for i in range(len(self.hidden_units)):
fc = tf.nn.bias_add(tf.tensordot(deep_input, self.kernels[i], axes=(-1, 0)), self.bias[i])
fc = self.bn_layers[i](fc, training=training) if self.use_bn else fc
fc = self.activation_layers[i](fc)
fc = self.dropout_layers[i](fc, training=training)
return fc
def compute_output_shape(self, input_shape):
if len(self.hidden_units) > 0:
shape = input_shape[:-1] + (self.hidden_units[-1],)
else:
shape = input_shape
return tuple(shape)
def get_config(self, ):
config = {
'activation': self.activation,
'hidden_units': self.hidden_units,
'l2_reg': self.l2_reg,
'use_bn': self.use_bn,
'dropout_rate': self.dropout_rate,
'seed': self.seed
}
base_config = super(DNN, self).get_config() # TODO: 更新配置信息
return {**base_config, **config}
# return dict(list(base_config.items()) + list(config.items()))
```
#### File: deeptricks/layers/__init__.py
```python
from functools import partial
from collections import Iterable
from keras.layers import Dense
# last layer: 'softmax' or 'sigmoid'
def getDenseList(unitsList, activation=None):
assert isinstance(unitsList, Iterable)
dense = partial(Dense, activation=activation)
return map(dense, unitsList)
``` |
{
"source": "Jie-Yuan/finch",
"score": 2
} |
#### File: tensorflow/attn_is_all_u_need/model.py
```python
from config import args
from modules import *
import tensorflow as tf
def forward_pass(sources, targets, params, reuse=False):
with tf.variable_scope('forward_pass', reuse=reuse):
pos_enc = _get_position_encoder()
# ENCODER
en_masks = tf.sign(sources)
with tf.variable_scope('encoder_embedding', reuse=reuse):
encoded = embed_seq(
sources, params['source_vocab_size'], args.hidden_units, zero_pad=True, scale=True)
with tf.variable_scope('encoder_position_encoding', reuse=reuse):
encoded += pos_enc(sources, en_masks, args.hidden_units)
with tf.variable_scope('encoder_dropout', reuse=reuse):
encoded = tf.layers.dropout(encoded, args.dropout_rate, training=(not reuse))
for i in range(args.num_blocks):
with tf.variable_scope('encoder_attn_%d'%i, reuse=reuse):
encoded = multihead_attn(queries=encoded, keys=encoded, q_masks=en_masks, k_masks=en_masks,
num_units=args.hidden_units, num_heads=args.num_heads, dropout_rate=args.dropout_rate,
future_binding=False, reuse=reuse, activation=None)
with tf.variable_scope('encoder_feedforward_%d'%i, reuse=reuse):
encoded = pointwise_feedforward(encoded, num_units=[4*args.hidden_units, args.hidden_units],
activation=params['activation'])
# DECODER
decoder_inputs = _shift_right(targets, params['start_symbol'])
de_masks = tf.sign(decoder_inputs)
if args.tied_embedding:
with tf.variable_scope('encoder_embedding', reuse=True):
decoded = embed_seq(decoder_inputs, params['target_vocab_size'], args.hidden_units,
zero_pad=True, scale=True)
else:
with tf.variable_scope('decoder_embedding', reuse=reuse):
decoded = embed_seq(
decoder_inputs, params['target_vocab_size'], args.hidden_units, zero_pad=True, scale=True)
with tf.variable_scope('decoder_position_encoding', reuse=reuse):
decoded += pos_enc(decoder_inputs, de_masks, args.hidden_units)
with tf.variable_scope('decoder_dropout', reuse=reuse):
decoded = tf.layers.dropout(decoded, args.dropout_rate, training=(not reuse))
for i in range(args.num_blocks):
with tf.variable_scope('decoder_self_attn_%d'%i, reuse=reuse):
decoded = multihead_attn(queries=decoded, keys=decoded, q_masks=de_masks, k_masks=de_masks,
num_units=args.hidden_units, num_heads=args.num_heads, dropout_rate=args.dropout_rate,
future_binding=True, reuse=reuse, activation=None)
with tf.variable_scope('decoder_attn_%d'%i, reuse=reuse):
decoded = multihead_attn(queries=decoded, keys=encoded, q_masks=de_masks, k_masks=en_masks,
num_units=args.hidden_units, num_heads=args.num_heads, dropout_rate=args.dropout_rate,
future_binding=False, reuse=reuse, activation=None)
with tf.variable_scope('decoder_feedforward_%d'%i, reuse=reuse):
decoded = pointwise_feedforward(decoded, num_units=[4*args.hidden_units, args.hidden_units],
activation=params['activation'])
# OUTPUT LAYER
if args.tied_proj_weight:
b = tf.get_variable('bias', [params['target_vocab_size']], tf.float32)
_scope = 'encoder_embedding' if args.tied_embedding else 'decoder_embedding'
with tf.variable_scope(_scope, reuse=True):
shared_w = tf.get_variable('lookup_table')
decoded = tf.reshape(decoded, [-1, args.hidden_units])
logits = tf.nn.xw_plus_b(decoded, tf.transpose(shared_w), b)
logits = tf.reshape(logits, [tf.shape(sources)[0], -1, params['target_vocab_size']])
else:
with tf.variable_scope('output_layer', reuse=reuse):
logits = tf.layers.dense(decoded, params['target_vocab_size'], reuse=reuse)
return logits
def _model_fn_train(features, mode, params, logits):
with tf.name_scope('backward'):
targets = features['target']
masks = tf.to_float(tf.not_equal(targets, 0))
if args.label_smoothing:
loss_op = label_smoothing_sequence_loss(
logits=logits, targets=targets, weights=masks, label_depth=params['target_vocab_size'])
else:
loss_op = tf.contrib.seq2seq.sequence_loss(
logits=logits, targets=targets, weights=masks)
if args.lr_decay_strategy == 'noam':
step_num = tf.train.get_global_step() + 1 # prevents zero global step
lr = _get_noam_lr(step_num)
elif args.lr_decay_strategy == 'exp':
lr = tf.train.exponential_decay(1e-3, tf.train.get_global_step(), 100000, 0.1)
else:
raise ValueError("lr decay strategy must be one of 'noam' and 'exp'")
log_hook = tf.train.LoggingTensorHook({'lr': lr}, every_n_iter=100)
train_op = tf.train.AdamOptimizer(lr).minimize(loss_op, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss_op, train_op=train_op, training_hooks=[log_hook])
def _model_fn_predict(features, mode, params):
def cond(i, x, temp):
return i < args.target_max_len
def body(i, x, temp):
logits = forward_pass(features['source'], x, params, reuse=True)
ids = tf.argmax(logits, -1)[:, i]
ids = tf.expand_dims(ids, -1)
temp = tf.concat([temp[:, 1:], ids], -1)
x = tf.concat([temp[:, -(i+1):], temp[:, :-(i+1)]], -1)
x = tf.reshape(x, [tf.shape(temp)[0], args.target_max_len])
i += 1
return i, x, temp
_, res, _ = tf.while_loop(cond, body, [tf.constant(0), features['target'], features['target']])
return tf.estimator.EstimatorSpec(mode=mode, predictions=res)
def tf_estimator_model_fn(features, labels, mode, params):
logits = forward_pass(features['source'], features['target'], params)
if mode == tf.estimator.ModeKeys.TRAIN:
_ = forward_pass(features['source'], features['target'], params, reuse=True)
return _model_fn_train(features, mode, params, logits)
if mode == tf.estimator.ModeKeys.PREDICT:
return _model_fn_predict(features, mode, params)
def _shift_right(targets, start_symbol):
start_symbols = tf.cast(tf.fill([tf.shape(targets)[0], 1], start_symbol), tf.int64)
return tf.concat([start_symbols, targets[:, :-1]], axis=-1)
def _get_position_encoder():
if args.position_encoding == 'non_param':
pos_enc = sinusoidal_position_encoding
elif args.position_encoding == 'param':
pos_enc = learned_position_encoding
else:
raise ValueError("position encoding has to be either 'sinusoidal' or 'learned'")
return pos_enc
def _get_noam_lr(step_num):
return tf.rsqrt(tf.to_float(args.hidden_units)) * tf.minimum(
tf.rsqrt(tf.to_float(step_num)),
tf.to_float(step_num) * tf.convert_to_tensor(args.warmup_steps ** (-1.5)))
```
#### File: tensorflow/end2end_mn/model.py
```python
from config import args
import tensorflow as tf
import numpy as np
def model_fn(features, labels, mode, params):
if labels is None:
labels = tf.zeros([tf.shape(features['inputs'])[0], params['max_answer_len']], tf.int64)
logits = forward(features, params, is_training=True, seq_inputs=shift_right(labels, params))
predicted_ids = forward(features, params, is_training=False)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predicted_ids)
if mode == tf.estimator.ModeKeys.TRAIN:
loss_op = tf.reduce_mean(tf.contrib.seq2seq.sequence_loss(
logits=logits, targets=labels, weights=tf.ones_like(labels, tf.float32)))
train_op = tf.train.AdamOptimizer().minimize(loss_op,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss_op, train_op=train_op)
def forward(features, params, is_training, seq_inputs=None, reuse=tf.AUTO_REUSE):
with tf.variable_scope('memory_o', reuse=reuse):
memory_o = input_mem(features['inputs'], params, is_training)
with tf.variable_scope('memory_i', reuse=reuse):
memory_i = input_mem(features['inputs'], params, is_training)
with tf.variable_scope('questions', reuse=reuse):
question = quest_mem(features['questions'], params, is_training)
match = tf.matmul(question, tf.transpose(memory_i, [0,2,1]))
match = pre_softmax_masking(match, features['inputs_len'], params['max_input_len'])
match = tf.nn.softmax(match) # (batch, question_maxlen, input_maxlen)
match = post_softmax_masking(match, features['questions_len'], params['max_quest_len'])
response = tf.matmul(match, memory_o)
with tf.variable_scope('memory_o', reuse=True):
embedding = tf.get_variable('lookup_table')
with tf.variable_scope('answer', reuse=reuse):
answer = tf.layers.flatten(tf.concat([response, question], -1))
output = answer_module(features, params, answer, embedding, is_training, seq_inputs)
return output
def input_mem(x, params, is_training):
x = embed_seq(x, params)
x = tf.layers.dropout(x, args.dropout_rate, training=is_training)
pos = position_encoding(params['max_sent_len'], args.hidden_dim)
x = tf.reduce_sum(x * pos, 2)
return x
def quest_mem(x, params, is_training):
x = embed_seq(x, params)
x = tf.layers.dropout(x, args.dropout_rate, training=is_training)
pos = position_encoding(params['max_quest_len'], args.hidden_dim)
return (x * pos)
def answer_module(features, params, answer, embedding, is_training, seq_inputs=None):
answer = tf.layers.dense(answer, args.hidden_dim, name='answer_hidden')
init_state = tf.layers.dropout(answer, args.dropout_rate, training=is_training)
if is_training:
helper = tf.contrib.seq2seq.TrainingHelper(
inputs = tf.nn.embedding_lookup(embedding, seq_inputs),
sequence_length = tf.to_int32(features['answers_len']))
decoder = tf.contrib.seq2seq.BasicDecoder(
cell = GRU('decoder_rnn'),
helper = helper,
initial_state = init_state,
output_layer = tf.layers.Dense(params['vocab_size'], name='vocab_proj'))
decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = decoder)
return decoder_output.rnn_output
else:
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding = embedding,
start_tokens = tf.tile(
tf.constant([params['<start>']], dtype=tf.int32), [tf.shape(init_state)[0]]),
end_token = params['<end>'])
decoder = tf.contrib.seq2seq.BasicDecoder(
cell = GRU('decoder_rnn'),
helper = helper,
initial_state = init_state,
output_layer = tf.layers.Dense(params['vocab_size'], name='vocab_proj'))
decoder_output, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder = decoder,
maximum_iterations = params['max_answer_len'])
return decoder_output.sample_id
def pre_softmax_masking(x, seq_len, max_seq_len):
paddings = tf.fill(tf.shape(x), float('-inf'))
T = x.get_shape().as_list()[1]
masks = tf.sequence_mask(seq_len, max_seq_len, dtype=tf.float32)
masks = tf.tile(tf.expand_dims(masks, 1), [1, T, 1])
return tf.where(tf.equal(masks, 0), paddings, x)
def post_softmax_masking(x, seq_len, max_seq_len):
T = x.get_shape().as_list()[-1]
masks = tf.sequence_mask(seq_len, max_seq_len, dtype=tf.float32)
masks = tf.tile(tf.expand_dims(masks, -1), [1, 1, T])
return (x * masks)
def shift_right(x, params):
batch_size = tf.shape(x)[0]
start = tf.to_int64(tf.fill([batch_size, 1], params['<start>']))
return tf.concat([start, x[:, :-1]], 1)
def GRU(name, rnn_size=None):
rnn_size = args.hidden_dim if rnn_size is None else rnn_size
return tf.nn.rnn_cell.GRUCell(
rnn_size, kernel_initializer=tf.orthogonal_initializer(), name=name)
def embed_seq(x, params, zero_pad=True):
lookup_table = tf.get_variable('lookup_table', [params['vocab_size'], args.hidden_dim], tf.float32)
if zero_pad:
lookup_table = tf.concat((tf.zeros([1, args.hidden_dim]), lookup_table[1:, :]), axis=0)
return tf.nn.embedding_lookup(lookup_table, x)
def position_encoding(sentence_size, embedding_size):
encoding = np.ones((embedding_size, sentence_size), dtype=np.float32)
ls = sentence_size + 1
le = embedding_size + 1
for i in range(1, le):
for j in range(1, ls):
encoding[i-1, j-1] = (i - (le-1)/2) * (j - (ls-1)/2)
encoding = 1 + 4 * encoding / embedding_size / sentence_size
return np.transpose(encoding)
``` |
{
"source": "Jie-Yuan/iNLP",
"score": 2
} |
#### File: explode/strokes/handian.py
```python
import requests
import time
class Handian(object):
def __init__(self):
self.headers = {
"Host": "www.zdic.net",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "zh-TW,zh-CN;q=0.8,zh;q=0.6,en;q=0.4,en-US;q=0.2",
"Accept-Encoding": "gzip, deflate",
"Referer": "http://www.zdic.net/z/15/js/4EBA.htm",
"Cookie": "UM_distinctid=16264efc020144-0cdc7740ca36d5-4323461-1aeaa0-16264efc02125e; "
"ASPSESSIONIDCATTBADD=BIHGOGEBNPJNLMMGMHHJAKOP; ASPSESSIONIDCSSQSCSA=GCIALALAGJGGICPDGLHIFDDK; "
"CNZZDATA524192=cnzz_eid%3D915161487-1522106804-null%26ntime%3D1522193207; cxls=%E6%9E%97; lb%5Fc=mh; "
"lb%5Fb=mh; lb%5Fa=hp; tp=tp1; ASPSESSIONIDASQSRBQC=JDMHPALADHOGFHIPCAICKLNM", # 由于字数限制,这里省略掉cookie,见下文的回答
"Connection": "keep-alive",
# "Connection": "close",
}
self.params = {
"lb_a": "hp",
"lb_b": "mh",
"lb_c": "mh",
"tp": "tp1",
# "q": "我"
"q": None
}
self.cookies = {
"ASPSESSIONIDQQBTQBSA": "AIFKNJKBDMDCNKHIIAEFDLLD",
"ASPSESSIONIDQQCQSATA": "BKEMOFHCOMBMDNCDLHIINGHD",
"ASPSESSIONIDQQCSTBSA": "EGIHAKKBKKPDOFOKBGPODFHK",
"ASPSESSIONIDQQDQTATA": "PPLMDDDBKICEEJANAPIECGHG",
"ASPSESSIONIDQQDRRATA": "LDKKPNNAKMPDPFEGIIANBLJL",
"ASPSESSIONIDQSCQSATA": "NPDPCONAILAPOLMFPFLPJKMH",
"ASPSESSIONIDSQBRSATA": "DOFGMEICOILOJHJENECEOGDA",
"ASPSESSIONIDSQCRSASB": "PKJIAMNBGNCJMONFLNOEHJPD",
"ASPSESSIONIDSSDQTDQB": "MDBILFHCCKBIDADCMPHLLBLC",
"CNZZDATA524192": "cnzz_eid=551846853-1465193430-&ntime=1469453481",
"Hm_lpvt_57315eba0396d9939adbd0045addeae1": "1469451059",
"Hm_lvt_57315eba0396d9939adbd0045addeae1": "1467616616,1469451059",
"cxls": "人|伤|ã¤
|å¤|直|ç´|æ£|æ|ä¸|丨|丯|ç¶|丰|ç|ç|çæ´»|ð¢ª|æ
¹|æ
|åª|æ¤|é|å|é|é¼|å»|ð¯¨|é½",
"cxls": "丨|丯|人|ç¶|丰|伤|ä¸|ç|ç|çæ´»|ð¢ª|æ
¹|æ
|åª|æ¤|é|å|é|é¼|å»|ð¯¨|é½",
"lb_a": "hp",
"lb_b": "mh",
"lb_c": "mh",
"tp": "tp1",
}
def get_url(self, word):
self.params = {
"lb_a": "hp",
"lb_b": "mh",
"lb_c": "mh",
"tp": "tp1",
# "q": "我"
"q": word
}
requests.adapters.DEFAULT_RETRIES = 50
s = requests.session()
s.keep_alive = False
time.sleep(2)
response = requests.post("http://www.zdic.net/sousuo/", data=self.params,
headers=self.headers, cookies=self.cookies)
return response.url
if __name__ == "__main__":
handian = Handian()
print(handian.get_url("我"))
print(handian.get_url("你"))
print(handian.get_url("烔"))
```
#### File: inlp/similarity/__init__.py
```python
from simhash import Simhash
class SimHaming:
'''利用64位数,计算海明距离'''
def haming_distance(self, code_s1, code_s2):
x = (code_s1 ^ code_s2) & ((1 << 64) - 1)
ans = 0
while x:
ans += 1
x &= x - 1
return ans
'''利用相似度计算方式,计算全文编码相似度'''
def get_similarity(self, a, b):
if a > b:
return b / a
else:
return a / b
def get_features(self, s):
word_list = s
return word_list
'''计算两个全文编码的距离'''
def get_distance(self, code_s1, code_s2):
return self.haming_distance(code_s1, code_s2)
'''对全文进行编码'''
def get_code(self, string):
return Simhash(self.get_features(string)).value
'''计算s1与s2之间的距离'''
def distance(self, s1, s2):
"""
s = '对全文进行分词,提取全文特征,使用词性将虚词等无关字符去重'
word_list=[word.word for word in jieba.posseg.cut(s) if word.flag[0] not in ['u','x','w','o','p','c','m','q']]
s1 = word_list
s2 = ...
"""
code_s1 = self.get_code(s1)
code_s2 = self.get_code(s2)
similarity = (100 - self.haming_distance(code_s1, code_s2) * 100 / 64) / 100
return similarity
simhash = SimHaming().distance
```
#### File: inlp/text_preprocessing/KerasBow.py
```python
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
class KerasBow(object):
"""doc
词袋模型:我们可以为数据集中的所有单词制作一张词表,然后将每个单词和一个唯一的索引关联。
每个句子都是由一串数字组成,这串数字是词表中的独立单词对应的个数。
通过列表中的索引,我们可以统计出句子中某个单词出现的次数。
"""
def __init__(self, num_words=20000, maxlen=None):
"""
:param maxlen: 句子序列最大长度
:param num_words: top num_words-1(词频降序):保留最常见的num_words-1词
"""
self.maxlen = maxlen
self.num_words = num_words
self.tokenizer = None
def fit(self, docs):
"""
:param corpus: ['some thing to do', 'some thing to drink']与sklearn提取文本特征一致
"""
print('Create Bag Of Words ...')
self.tokenizer = Tokenizer(self.num_words, lower=False) # 不改变大小写(需提前预处理)
self.tokenizer.fit_on_texts(docs)
print("Get Unique Words In Corpus: %s" % len(self.tokenizer.word_index))
return self
def transform(self, docs):
print('Docs To Sequences ...')
sequences = self.tokenizer.texts_to_sequences(docs)
pad_docs = pad_sequences(sequences, self.maxlen, padding='post')
if self.maxlen is None:
self.maxlen = pad_docs.shape[1]
return pad_docs
def fit_transform(self, docs):
self.fit(docs)
return self.transform(docs)
``` |
{
"source": "Jie-Yuan/MeUtils",
"score": 3
} |
#### File: examples/betterme/j.py
```python
import requests
class ModelServer(object):
def __init__(self, url='http://10.1.42.180:30061', serving='pmml'):
self.metadata_url = f"{url}/{serving}/serving/metadata"
self.predict_url = f"{url}/{serving}/serving/predict"
fields = self.get_fields()
self.input_fields, self.output_fields, self.target_fields = fields.values()
print(fields)
def get_fields(self):
self._metadata = requests.get(self.metadata_url).json()
_ = self._metadata.get('data', {})
func = lambda field: list(map(lambda x: x['field'].get('name', {}).get('value'), _[field]))
return {field: func(field) for field in ['inputFields', 'outputFields', 'targetFields']}
def predict(self, values=None):
json = dict(zip(self.input_fields, values if values is None else range(len(self.input_fields))))
return requests.post(ms.predict_url, json=json).json()
```
#### File: examples/betterme/tophub_news.py
```python
import typer
import requests
from lxml.etree import HTML
from meutils.notice.wechat import Bot
cli = typer.Typer(name="Tophub")
def get_dom_tree(url="https://top.baidu.com/board?tab=realtime"):
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE '
}
r = requests.get(url, headers=headers)
return HTML(r.text)
@cli.command()
def send_news(title='今日热榜', url='https://tophub.today/n/x9ozB4KoXb', bot_key='5f51e925-1257-4f90-88ea-f474bc1e2a05'):
dom_tree = get_dom_tree(url)
titles = dom_tree.xpath(
"""//*[@id="page"]/div[2]/div[2]/div[1]/div[2]/div/div[1]/table/tbody/tr[*]/td[2]/a/text()"""
)[:10]
titles = map(lambda x: x.strip().replace('\n', ' '), titles)
titles = filter(lambda x: len(x) > 6, titles)
titles = '\n'.join(map(lambda x: f"{x}", titles)) # {'#' * 8}
print(titles)
bot = Bot(bot_key)
json = {
"msgtype": "markdown",
"markdown": {
"content": f"""
> # [{title}]({url})\n\n{titles}
""".strip(),
}
}
bot.send(json)
# if __name__ == '__main__':
# send_news(title='证券日报', url='https://tophub.today/n/wkvlP5kvz1')
if __name__ == '__main__':
cli()
```
#### File: examples/clis/a.py
```python
def f1(x):
print(type(x))
print(f'f1: {x}')
def f2(x=2):
print(f'f2: {x}')
class Conf:
a=1
b=2
c=3
if __name__ == '__main__':
from fire import Fire
Fire(Conf)
```
#### File: MeUtils/examples/importlib_.py
```python
import importlib
"""
class C:
def c(self):
pass
"""
params = importlib.import_module('b.c.c') # 绝对导入
params_ = importlib.import_module('.c.c', package='b') # 相对导入
```
#### File: MeUtils/examples/memory_profiler_demo.py
```python
from memory_profiler import profile
@profile
def my_func():
a = [1] * (10 ** 6)
b = [2] * (2 * 10 ** 7)
del b
return a
if __name__ == '__main__':
my_func()
# mprof run <script>
# mprof plot
```
#### File: MeUtils/examples/zk_hot_update.py
```python
import time
from kazoo.retry import KazooRetry
from kazoo.client import KazooClient
# def func():
#
# print(time.time())
# with open('./config.txt') as f:
# return f.read().strip()
#
# print(KazooRetry()(func))
#
# while 1:
# time.sleep(2)
# KazooRetry()(func)
zk = KazooClient(hosts="127.0.0.1:2181")
```
#### File: aizoo/layers/Prediction.py
```python
import tensorflow as tf
from .utils import get_activation_by_num_class
class Prediction(tf.keras.layers.Layer):
def __init__(self, num_class=2, name='Prediction', **kwargs):
super().__init__(name=name, **kwargs)
self.num_class = num_class
self.activation = get_activation_by_num_class(num_class)
def build(self, input_shape):
super().build(input_shape)
units = self.num_class if self.num_class > 2 else 1 # 多分类输出 num_class 维
self.fc = tf.keras.layers.Dense(units, activation=self.activation)
def call(self, inputs, **kwargs):
return self.fc(inputs)
def compute_output_shape(self, input_shape):
if self.task == "multiclass":
return (None, self.num_class)
else:
return (None, 1)
def get_config(self, ):
base_config = super().get_config()
config = {'task': self.task, 'num_class': self.num_class}
return {**base_config, **config}
```
#### File: aizoo/video/video.py
```python
from meutils.pipe import *
from moviepy.editor import *
# TODO: 抽帧去重
def video2picture(video='/fds/1_Work/2_DownVideo/videos/互联网_yidian_V_07d0oZik.mp4', top_duration=180):
p = Path(video)
pic_dir = ''.join(p.name[::-1].split('.')[-1:])[::-1]
(p.parent / pic_dir).mkdir(exist_ok=True)
with VideoFileClip(video) as clip:
duration = int(clip.duration)
for i in tqdm(range(min(duration, top_duration))):
clip_ = clip.subclip(i, i + 1)
clip_.save_frame(p.parent / pic_dir / f'{i}.png')
def video2audio(path_pair, verbose=False, subclip=None, ffmpeg_params=["-f", "mp3"]):
"""
clip = VideoFileClip('蛋清和蛋黄是这么分离的.720p').subclip(3, 7)
:param paths: (video_path, audio_path)
:param subclip:
:param ffmpeg_params:
:return:
"""
video_path, audio_path = path_pair
with VideoFileClip(video_path) as clip:
duration = int(clip.duration)
if subclip:
s, e = subclip[0], duration if subclip is None or duration < subclip[1] else subclip[1]
clip = clip.subclip(s, e)
clip.audio.write_audiofile(
audio_path, fps=None, nbytes=2, buffersize=2000,
codec=None, bitrate=None, ffmpeg_params=ffmpeg_params,
write_logfile=False, verbose=verbose, logger='bar' if verbose else None
)
```
#### File: meutils/annzoo/ann.py
```python
from meutils.pipe import *
from milvus import Milvus, DataType
import copy
# IndexType.
# from milvus.client.exceptions import CollectionNotExistException
"""
client.drop_index
client.get_config
client.list_id_in_segment
client.load_collection???
"""
class Collection(object):
def __init__(self, name=None, client=None):
self.name = name
self.client = client
self.count_entities = self.count
self.count_documents = self.count
self.vector_name = self.get_vec_field_name()
def __str__(self):
has_collection = self.client.has_collection(self.name)
if not has_collection:
logger.warning(f"{self.name} doesn't exist")
return f"Collection({self.name})"
def batch_insert(self, df_entity: pd.DataFrame, batch_size=100000):
"""
:param df_entity: id, sid, vec, part 与 collection 字段一致
:param batch_size:
:return:
"""
entity_names = [_['name'] for _ in self.collection_info['fields']]
logger.warning(f"EntityNames: {entity_names}")
# 分区
df_entity = df_entity.reset_index(drop=True)
n = len(df_entity)
num_part = n // batch_size + 1 if n % batch_size else n // batch_size
ids = []
for i in tqdm(range(num_part), desc='BatchInsert'):
df = df_entity.iloc[i * batch_size:(i + 1) * batch_size, :]
entities = []
for record in self.collection_info['fields']:
entities.append({
'name': record['name'],
'type': record['type'],
'values': df[record['name']].values
})
ids += self.client.insert(self.name, entities, ids=df['id'] if 'id' in df else None)
time.sleep(3)
return ids
def search(
self, vectors=np.random.random((1, 256)),
topk=10,
nprobe=1,
scalar_list: List[dict] = None):
q = self.get_search_query(vectors, topk, nprobe, scalar_list)
entities = self.client.search(self.name, q)[0]
return entities # todo: 增加阈值过滤
# entities = ann.client.search("demo", query_hybrid)[0]
# id2score = dict(zip(entities.ids, entities.distances))
# docs = mongo_collection.find({"xindaoid": {'$in': entities.ids}})
# df = pd.DataFrame(list(docs)).drop(['_id', 'category_', 'vector'], 1)
# df['distance'] = df['xindaoid'].map(id2score)
def batch_search(self, vectors=np.random.random((1, 256)), topk=10, nprobe=1,
scalar_list: List[dict] = None):
q = self.get_search_query(vectors, topk, nprobe, scalar_list)
entities = self.client.search(self.name, q)
return entities
def get_entity_by_id(self, ids, fields=None):
return self.client.get_entity_by_id(self.name, ids, fields)
def delete_entity_by_id(self, ids):
self.client.delete_entity_by_id(self.name, ids)
@property
def count(self):
return self.client.count_entities(self.name)
@property
def collection_info(self):
return self.client.get_collection_info(self.name)
@property
def collection_stats(self):
return self.client.get_collection_stats(self.name)
def get_vec_field_name(self):
fields = self.collection_info['fields']
vec_field = [_ for _ in fields if str(_.get('type', '')).__contains__('VECTOR')][0]
return vec_field['name']
def get_search_query(self, vectors, topk=10, nprobe=1, scalar_list: List[dict] = None):
"""
ann.demo.search(np.random.random((1, 10)), scalar_list=[{'term': {'scalar': [1,2,3,4]}}])
:param vectors:
:param topk:
:param nprobe:
:param scalar_list:
:return:
"""
q = {
"bool": {
"must": [
{
"vector": {
self.vector_name: {
"topk": topk,
"query": vectors,
"metric_type": "IP",
"params": {
"nprobe": nprobe
}
}
}
},
]
}
}
if scalar_list is not None: # {"term": {"标量字段": [1,2,3]}}
for _ in scalar_list:
q['bool']['must'].append(_)
return q
class ANN(object):
def __init__(self, host='10.46.242.23', port='19530', handler="GRPC", pool="SingletonThread", show_info=False):
self.host = host
self.client = Milvus(host, port, handler=handler, pool=pool, pool_size=32) # 线程池
if show_info:
logger.info(
{
"ClientVersion": self.client.client_version(),
"ServerVersion": self.client.server_version()
}
)
def __getattr__(self, collection_name) -> Collection:
return Collection(collection_name, self.client)
def create_collection(self, collection_name, fields, auto_id=True, segment_row_limit=4096, overwrite=True):
"""
:param collection_name:
:param fields: # type: BOOL INT32 INT64 FLOAT BINARY_VECTOR FLOAT_VECTOR
fields = [
{
"name": "scalar",
"type": 'INT32',
"params": {},
"indexes": [{}]
},
{
"name": "vector",
"type": 'FLOAT_VECTOR',
"params": {"dim": 768},
"indexes": [{"index_type": 'IVF_FLAT', 'metric_type': 'IP', 'params': {'nlist': 1024}, 'index_file_size': 1024}]
}
]
# index_file_size不确定放在哪生效
:param auto_id:
:param segment_row_limit: range 4096 ~ 4194304
:return:
"""
fields = copy.deepcopy(fields) # fields[:]
if self.client.has_collection(collection_name):
if overwrite:
logger.warning(f"{collection_name} already exists! to drop.")
self.client.drop_collection(collection_name, timeout=300)
else:
return f"{collection_name} already exists!"
vec_field = [_ for _ in fields if _.get('type', '').__contains__('VECTOR')][0]
# assert len(vec_fields) > 0, "至少有一个矢量"
for _ in fields:
if 'type' in _:
_['type'] = DataType.__getattr__(_['type'])
collection_param = {
"fields": fields,
"auto_id": auto_id,
"segment_row_limit": segment_row_limit,
}
# collection vector index
self.client.create_collection(collection_name, fields=collection_param)
self.client.create_index(collection_name, vec_field['name'], vec_field['indexes'][0])
logger.info(f"{self.client.get_collection_info(collection_name)}")
@property
def collection_names(self):
return self.client.list_collections()
def __create_index(self, collection_name, field_name, index_type='IVF_FLAT', metric_type='IP', index_params=None):
if index_params is None:
index_params = {'nlist': 1024}
params = {
'index_type': index_type,
# 'index_file_size': 1024, # TODO: 不确定放在哪生效
'params': index_params,
'metric_type': metric_type,
}
self.client.create_index(collection_name, field_name, params) # field_name='embedding'
if __name__ == '__main__':
ann = ANN('10.119.18.201', show_info=True)
fields = [
{
"name": "scalar",
"type": 'INT32',
"params": {},
"indexes": [{}]
},
{
"name": "vector",
"type": 'FLOAT_VECTOR',
"params": {"dim": 256},
"indexes": [
{"index_type": 'IVF_FLAT', 'metric_type': 'IP', 'params': {'nlist': 1024}, 'index_file_size': 1024}]
}
]
ann.create_collection('demo', fields)
print(ann.demo)
print(ann.demo.collection_info)
print(ann.demo.vector_name)
# print(ann.demo.collection_stats)
# df列名必须与fields一致
df = pd.DataFrame(enumerate('abcdefgh'), columns=['scalar', 'sid']).assign(
vector=np.random.random((8, 256)).tolist())
ann.demo.batch_insert(df)
```
#### File: MeUtils/meutils/array_utils.py
```python
def multi_list(ls, n=10):
return sum(([i] * n for i in ls), [])
if __name__ == '__main__':
ls = range(5)
print(multi_list(ls, 3))
```
#### File: MeUtils/meutils/cli.py
```python
from meutils.pipe import *
class CLIRun(object):
"""doc"""
def __init__(self, **kwargs):
pass
def apps_list(self, apps='apps'):
"""
apps/apps_streamlit
"""
def pip(self, *packages):
"""
mecli - pip "meutils appzoo"
:param packages:
:return:
"""
packages = " ".join(packages)
cmd = f"pip install -U --no-cache-dir -i https://mirrors.aliyun.com/pypi/simple {packages} && pip install -U {packages}"
logger.info(cmd)
os.system(cmd)
# todo: 增加常用包更新
def cli():
fire.Fire(CLIRun)
if __name__ == '__main__':
print(CLIRun().pip())
#
# import fire
#
# def add(x, y):
# return x + y
#
# def multiply(x, y):
# return x * y
#
# if __name__ == '__main__':
# fire.Fire()
# We can use this like so:
#
# $ python example.py add 10 20
# 30
# $ python example.py multiply 10 20
# 200
```
#### File: meutils/clis/conf.py
```python
from meutils.pipe import *
# 定义参数
class TrainConf(BaseConfig):
epoch = 10
batch_size = 128
def train(**kwargs):
logger.info("开始训练")
time.sleep(3)
# 使用参数
def run(**kwargs):
logger.info(f"输入参数: {kwargs}")
c = TrainConf.parse_obj(kwargs)
logger.info(f"使用参数: {c.dict()}")
train(**c.dict())
# 传入参数
conf_cli = lambda: fire.Fire(run) # <conf_cli> --epoch 11 --batch_size 111
# fire.Fire()需要指定命令对象
```
#### File: meutils/cmds/hdfs_cmd.py
```python
from meutils.pipe import *
class HDFS(object):
HADOOP_HOME = os.environ.get('HADOOP_HOME', '~/infra-client/bin')
HDFS_CLUSTER_NAME = os.environ.get('HDFS_CLUSTER_NAME', 'zjyprc-hadoop')
HDFS_CMD = f"{HADOOP_HOME}/hdfs --cluster {HDFS_CLUSTER_NAME} dfs" # f"{HADOOP_HOME}/hadoop --cluster {HDFS_CLUSTER_NAME} fs"
@classmethod
def check_path_isexist(cls, path):
cmd = f"-test -e {path}" # 包含?-test -d
status, output = cls.magic_cmd(cmd)
rst = False if status != 0 else True
logger.info(f'Path Exist: {rst}')
return rst
@classmethod
def touchz(cls, path):
"""
:param path: /user/h_browser/algo/yuanjie/jars/xx.txt
:return:
"""
cmd = f"-touchz {path}"
return cls.magic_cmd(cmd)
@classmethod
def wc_l(cls, path):
"""
:param path: /user/h_data_platform/platform/browser/push_group/locals/江苏_南京/date=20210120/*
:return:
"""
cmd = f"-cat {path} | wc -l"
return cls.magic_cmd(cmd)
@classmethod
def magic_cmd(cls, cmd):
"""
:param cmd: -cat /user/h_browser/algo/yuanjie/jars/vec.csv
:return:
"""
cmd = f"{cls.HDFS_CMD} {cmd}"
return magic_cmd(cmd)
@classmethod
def push2hdfs(cls, input, output):
cls.magic_cmd(f"-mkdir -p {output}")
cls.magic_cmd(f"-put -f {input} {output}")
cls.touchz(f"{output}/_SUCCESS")
```
#### File: meutils/db/neo4j.py
```python
from meutils.pipe import *
from py2neo import Graph, Node, Relationship
from concurrent.futures import ThreadPoolExecutor
class Neo4j(object):
"""TODO
添加属性值:关系的属性
"""
def __init__(self, profile="bolt://xx:7687", username='neo4j', password='mi'):
self.graph = Graph(profile, username=username, password=password)
# self.graph.delete_all()
def create_nodes(self, df_nodes, label="Demo", max_workers=30):
"""
:param label: Node Label 可以理解为一个集合
:param node_list: [(k, v, r), ] 三元组
:return:
"""
df_nodes.columns = ['k', 'v', 'r']
groups = df_nodes.groupby('k')
print(f"Num Group: {len(groups)}")
func = lambda group: [self._create_node(label, nodes) for nodes in tqdm(group[1].values)]
with ThreadPoolExecutor(max_workers, thread_name_prefix=f"{label}__") as pool:
_ = pool.map(func, tqdm(groups), chunksize=1)
def _create_node(self, label, nodes):
k, v, r = nodes
node_key = self._create(label, k)
node_value = self._create(label, v)
if node_key != node_value:
node_relation = Relationship(node_key, r, node_value) # r也可以是节点
# node_relation['属性'] = 0
self.graph.create(node_relation)
@lru_cache(1024)
def _create(self, label, name):
subgraph = Node(label, name=name)
self.graph.create(subgraph)
return subgraph
```
#### File: meutils/request_utils/crawler.py
```python
from lxml.etree import HTML
from meutils.request_utils import request
class Crawler(object):
def __init__(self, url, encoding=None, *args, **kwargs):
self.url = url
self.html = self.get_html(url, encoding)
def xpath(self, _path="//text()", **_variables):
return self.html.xpath(_path, **_variables)
@staticmethod
def get_html(url, encoding):
r = request(url, parser=None, encoding=encoding)
return HTML(r.text)
if __name__ == '__main__':
url = "https://top.baidu.com/board?tab=realtime"
_ = Crawler(url).xpath('//*[@id="sanRoot"]/main/div[2]/div/div[2]/div[*]/div[2]/a/div[1]//text()')
print("\n".join(_))
```
#### File: meutils/request_utils/__init__.py
```python
import requests
from loguru import logger
from tenacity import retry, stop_after_delay, stop_after_attempt, wait_fixed
@retry(wait=wait_fixed(3), # 重试之前等待3秒
stop=stop_after_delay(7) | stop_after_attempt(3), # 同时满足用 | 没毛病:重试7秒重试3次
retry_error_callback=lambda log: logger.error(log),
reraise=True)
# @lru_cache()
def request(url=None, json=None, parser=lambda x: x, encoding=None, **kwargs):
"""
:param url:
:param json:
:param parser: None 的时候返回r,否则返回 parser(r.json())
:param kwargs:
:return:
"""
method = 'post' if json is not None else 'get' # 特殊情况除外
logger.info(f"Request Method: {method}")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE '
}
r = requests.request(method, url, json=json, headers=headers)
r.encoding = encoding if encoding else r.apparent_encoding
if parser is None:
return r
return parser(r.json())
```
#### File: meutils/request_utils/results.py
```python
from meutils.pipe import *
from meutils.zk_utils import zk_cfg
from meutils.request_utils import request
def get_ac(docid, parser=lambda x: x.get('item', {})):
return request(f"{zk_cfg.ac_url}/{docid}", parser=parser)
def get_acs(docids, max_workers=10, parser=lambda x: x.get('item', {}).get('title')):
func = functools.partial(get_ac, parser=parser)
return docids | xThreadPoolExecutor(func, max_workers) | xlist
def get_simbert_vectors(titles='bert向量化', max_workers=1, is_lite='0'):
"""
适合小批量请求
:param titles:
:param max_workers:
:param is_lite:
:return:
"""
if isinstance(titles, str):
titles = [titles]
max_workers = min(len(titles), max_workers)
titles_list = np.array_split(titles, len(titles) // 64 + 1) # list
request_func = lambda titles: request(f"{zk_cfg.simbert_url}",
json={"texts": list(titles), "is_lite": is_lite}).get('vectors')
vectors_list = titles_list | xThreadPoolExecutor(request_func, max_workers)
return np.row_stack(vectors_list)
if __name__ == '__main__':
print(get_acs(['fengxing_144094389']))
# print(get_simbert_vectors('bert向量化', is_lite='1').shape)
```
#### File: MeUtils/meutils/smooth_utils.py
```python
from meutils.pipe import *
"""
TODO: 增加可视化
"""
def exponential_decay(t, life_cycle=24 * 7, start=1, end=0):
"""牛顿冷却定律
拟合随时间指数衰减的过程
https://blog.csdn.net/xiaokang06/article/details/78076925
https://blog.csdn.net/zhufenghao/article/details/80879260
:param t: 0~delta
:param life_cycle: 衰减时间长度/生命周期
:param start: 起始值
:param end: 结束值
:return:
"""
α = np.log(start / (end + 1e-8)) / life_cycle
t0 = - np.log(start) / α
decay = np.exp(- α * (t + t0))
return decay
def walson_ctr(num_click, num_pv, z=1.96):
""":arg
威尔逊
https://mp.weixin.qq.com/s/rLP1wsS0a71q5RA7NQcjdQ
"""
p = num_click / num_pv
if p > 0.9:
return 0.0
n = num_pv
A = p + z ** 2 / (2 * n)
B = np.sqrt(p * (1 - p) / n + z ** 2 / (4 * (n ** 2)))
C = z * B
D = 1 + z ** 2 / n
ctr = (A - C) / D
return ctr
``` |
{
"source": "Jie-Yuan/optuna-dashboard",
"score": 2
} |
#### File: optuna-dashboard/python_tests/test_api.py
```python
import json
from unittest import TestCase
import optuna
from optuna_dashboard.app import create_app
from .wsgi_client import send_request
def objective(trial: optuna.trial.Trial) -> float:
x = trial.suggest_float("x", -1, 1)
return x
class APITestCase(TestCase):
def test_get_study_summaries(self) -> None:
storage = optuna.storages.InMemoryStorage()
storage.create_new_study("foo1")
storage.create_new_study("foo2")
app = create_app(storage)
status, _, body = send_request(
app,
"/api/studies/",
"GET",
content_type="application/json",
)
self.assertEqual(status, 200)
study_summaries = json.loads(body)["study_summaries"]
self.assertEqual(len(study_summaries), 2)
def test_get_study_details_without_after_param(self) -> None:
study = optuna.create_study()
study_id = study._study_id
study.optimize(objective, n_trials=2)
app = create_app(study._storage)
status, _, body = send_request(
app,
f"/api/studies/{study_id}",
"GET",
content_type="application/json",
)
self.assertEqual(status, 200)
all_trials = json.loads(body)["trials"]
self.assertEqual(len(all_trials), 2)
def test_get_study_details_with_after_param_partial(self) -> None:
study = optuna.create_study()
study_id = study._study_id
study.optimize(objective, n_trials=2)
app = create_app(study._storage)
status, _, body = send_request(
app,
f"/api/studies/{study_id}",
"GET",
queries={"after": "1"},
content_type="application/json",
)
self.assertEqual(status, 200)
all_trials = json.loads(body)["trials"]
self.assertEqual(len(all_trials), 1)
def test_get_study_details_with_after_param_full(self) -> None:
study = optuna.create_study()
study_id = study._study_id
study.optimize(objective, n_trials=2)
app = create_app(study._storage)
status, _, body = send_request(
app,
f"/api/studies/{study_id}",
"GET",
queries={"after": "2"},
content_type="application/json",
)
self.assertEqual(status, 200)
all_trials = json.loads(body)["trials"]
self.assertEqual(len(all_trials), 0)
def test_get_study_details_with_after_param_illegal(self) -> None:
study = optuna.create_study()
study_id = study._study_id
study.optimize(objective, n_trials=2)
app = create_app(study._storage)
status, _, body = send_request(
app,
f"/api/studies/{study_id}",
"GET",
queries={"after": "-1"},
content_type="application/json",
)
self.assertEqual(status, 400)
def test_create_study(self) -> None:
for name, directions, expected_status in [
("single-objective success", ["minimize"], 201),
("multi-objective success", ["minimize", "maximize"], 201),
("invalid direction name", ["invalid-direction", "maximize"], 400),
]:
with self.subTest(name):
storage = optuna.storages.InMemoryStorage()
self.assertEqual(len(storage.get_all_study_summaries()), 0)
app = create_app(storage)
request_body = {
"study_name": "foo",
"directions": directions,
}
status, _, _ = send_request(
app,
"/api/studies",
"POST",
content_type="application/json",
body=json.dumps(request_body),
)
self.assertEqual(status, expected_status)
if expected_status == 201:
self.assertEqual(len(storage.get_all_study_summaries()), 1)
else:
self.assertEqual(len(storage.get_all_study_summaries()), 0)
def test_create_study_duplicated(self) -> None:
storage = optuna.storages.InMemoryStorage()
storage.create_new_study("foo")
self.assertEqual(len(storage.get_all_study_summaries()), 1)
app = create_app(storage)
request_body = {
"study_name": "foo",
"direction": "minimize",
}
status, _, _ = send_request(
app,
"/api/studies",
"POST",
content_type="application/json",
body=json.dumps(request_body),
)
self.assertEqual(status, 400)
self.assertEqual(len(storage.get_all_study_summaries()), 1)
def test_delete_study(self) -> None:
storage = optuna.storages.InMemoryStorage()
storage.create_new_study("foo1")
storage.create_new_study("foo2")
self.assertEqual(len(storage.get_all_study_summaries()), 2)
app = create_app(storage)
status, _, _ = send_request(
app,
"/api/studies/1",
"DELETE",
content_type="application/json",
)
self.assertEqual(status, 204)
self.assertEqual(len(storage.get_all_study_summaries()), 1)
def test_delete_study_not_found(self) -> None:
storage = optuna.storages.InMemoryStorage()
app = create_app(storage)
status, _, _ = send_request(
app,
"/api/studies/1",
"DELETE",
content_type="application/json",
)
self.assertEqual(status, 404)
class BottleRequestHookTestCase(TestCase):
def test_ignore_trailing_slashes(self) -> None:
storage = optuna.storages.InMemoryStorage()
app = create_app(storage)
endpoints = ["/api/studies", "/api/studies/"]
for endpoint in endpoints:
with self.subTest(msg=endpoint):
status, _, body = send_request(
app,
endpoint,
"GET",
content_type="application/json",
)
self.assertEqual(status, 200)
``` |
{
"source": "Jie-Yuan/StreamlitApp",
"score": 3
} |
#### File: appzoo/utils/ocr_utils.py
```python
from PIL import Image
from paddleocr import PaddleOCR, draw_ocr
from appzoo.utils import get_module_path
def ocr_result_image(result, input_image, output_image='output_image.png'):
# https://raw.githubusercontent.com/PaddlePaddle/PaddleOCR/develop/doc/simfang.ttf
image = Image.open(input_image).convert('RGB')
boxes = [line[0] for line in result]
txts = [line[1][0] for line in result]
scores = [line[1][1] for line in result]
im_show = draw_ocr(image, boxes, txts, scores, font_path=get_module_path("../data/simfang.ttf", __file__))
im_show = Image.fromarray(im_show)
im_show.save(output_image)
return output_image
``` |
{
"source": "Jie-Yuan/woe",
"score": 3
} |
#### File: lib/woe/ftrl.py
```python
__author__ = 'boredbird'
import numpy as np
class LR(object):
@staticmethod
def fn(w, x):
'''sigmoid function
'''
return 1.0 / (1.0 + np.exp(-w.dot(x)))
@staticmethod
def loss(y, y_hat):
'''Cross entropy loss function
'''
return np.sum(np.nan_to_num(-y * np.log(y_hat) - (1 - y) * np.log(1 - y_hat)))
@staticmethod
def grad(y, y_hat, x):
'''The first derivative of the cross entropy loss function to the weight W
'''
return (y_hat - y) * x
class FTRL(object):
def __init__(self, dim, l1, l2, alpha, beta, decisionFunc=LR):
self.dim = dim
self.decisionFunc = decisionFunc
self.z = np.zeros(dim)
self.n = np.zeros(dim)
self.w = np.zeros(dim)
self.w_list = []
self.loss_list = []
self.l1 = l1
self.l2 = l2
self.alpha = alpha
self.beta = beta
def predict(self, x):
return self.decisionFunc.fn(self.w, x)
def update(self, x, y):
self.w = np.array([0 if np.abs(self.z[i]) <= self.l1 else (np.sign(
self.z[i]) * self.l1 - self.z[i]) / (self.l2 + (self.beta + np.sqrt(self.n[i])) / self.alpha) for i in xrange(self.dim)])
y_hat = self.predict(x)
g = self.decisionFunc.grad(y, y_hat, x)
sigma = (np.sqrt(self.n + g * g) - np.sqrt(self.n)) / self.alpha
self.z += g - sigma * self.w
self.n += g * g
return self.decisionFunc.loss(y, y_hat)
def train(self, trainSet, verbos=False, max_itr=10000000000, eta=0.01, epochs=100):
itr = 0
n = 0
while True:
for x, y in trainSet:
loss = self.update(x, y)
if verbos and n%verbos==0:
print("itr=" + str(n) + "\tloss=" + str(loss))
self.w_list.append(self.w)
self.loss_list.append(loss)
if loss < eta:
itr += 1
else:
itr = 0
if itr >= epochs: # when the loss function has been continuously epochs iterations less than eta
print("loss have less than", eta, " continuously for ", itr, "iterations")
return
n += 1
if n >= max_itr:
print("reach max iteration", max_itr)
return
``` |
{
"source": "JieyuZ2/ASTRA",
"score": 3
} |
#### File: ASTRA/astra/Student.py
```python
import os
from model import LogRegTrainer, BertTrainer, DefaultModelTrainer
preprocessed_dataset_list = ['trec', 'youtube', 'sms', 'census', 'mitr']
supported_trainers = {
'logreg': LogRegTrainer,
'bert': BertTrainer,
}
class Student:
def __init__(self, args, logger=None):
self.args = args
self.logger = logger
self.name = args.student_name
assert self.name in supported_trainers, "Student not supported: <{}>".format(self.name)
self.trainer_class = supported_trainers[self.name]
if args.dataset in preprocessed_dataset_list:
self.trainer = DefaultModelTrainer(args=self.args, logger=self.logger)
else:
self.trainer = self.trainer_class(args=self.args, logger=self.logger)
self.preprocess = self.trainer.preprocess
def train(self, train_dataset, dev_dataset, train_label_name='label', dev_label_name='label'):
# Training student for the first time on few labeled data (First iteration of self-training)
res = self.trainer.train(
train_texts=train_dataset.data['texts'],
preprocessed_train_texts=train_dataset.data.get('preprocessed_texts'),
train_labels=train_dataset.data[train_label_name],
dev_texts=dev_dataset.data['texts'],
preprocessed_dev_texts=dev_dataset.data.get('preprocessed_texts'),
dev_labels=dev_dataset.data[dev_label_name],
)
return res
def train_pseudo(self, train_dataset, dev_dataset, train_label_name='label', train_weight_name='weights',
dev_label_name='label'):
# Fine-tuning student on pseudo-labeled data (provided by the Teacher)
# Call different function for student model with different hyperparameters: weighted training.
# Note: if train_weight_name is None, then weights are not used
res = self.trainer.train_pseudo(
train_texts=train_dataset.data['texts'],
preprocessed_train_texts=train_dataset.data.get('preprocessed_texts'),
train_labels=train_dataset.data[train_label_name],
train_weights=train_dataset.data.get(train_weight_name),
dev_texts=dev_dataset.data['texts'],
preprocessed_dev_texts=dev_dataset.data.get('preprocessed_texts'),
dev_labels=dev_dataset.data[dev_label_name],
)
return res
def finetune(self, train_dataset, dev_dataset, train_label_name='label', dev_label_name='label'):
# Fine-tuning student on few labeled data
# Note: this function is different than train() because of potentially different hyperparameters.
# If all hyperparameters are same, you can merge both train() and finetune() into one.
res = self.trainer.finetune(
train_texts=train_dataset.data['texts'],
preprocessed_train_texts=train_dataset.data.get('preprocessed_texts'),
train_labels=train_dataset.data[train_label_name],
dev_texts=dev_dataset.data['texts'],
preprocessed_dev_texts=dev_dataset.data.get('preprocessed_texts'),
dev_labels=dev_dataset.data[dev_label_name],
)
return res
def predict(self, dataset):
res = self.trainer.predict(
texts=dataset.data['texts'],
preprocessed_texts=dataset.data.get('preprocessed_texts'),
)
assert 'preds' in res and 'proba' in res, "Student Trainer must return 'preds' and 'proba'"
return res
def save(self, name='student'):
savefolder = os.path.join(self.args.logdir, name)
self.logger.info('Saving {} to {}'.format(name, savefolder))
os.makedirs(savefolder, exist_ok=True)
self.trainer.save(savefolder)
def load(self):
savefolder = os.path.join(self.args.logdir, 'student')
if not os.path.exists(savefolder):
raise(BaseException('Pre-trained student folder does not exist: {}'.format(savefolder)))
self.trainer.load(savefolder)
``` |
{
"source": "JieyuZ2/meta-weight-net",
"score": 2
} |
#### File: JieyuZ2/meta-weight-net/MW-Net.py
```python
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
from load_corrupted_data import CIFAR10, CIFAR100
# from wideresnet import WideResNet, VNet
from resnet import ResNet32, VNet
# import sklearn.metrics as sm
# import pandas as pd
# import sklearn.metrics as sm
parser = argparse.ArgumentParser(description='PyTorch WideResNet Training')
parser.add_argument('--dataset', default='cifar10', type=str,
help='dataset (cifar10 [default] or cifar100)')
parser.add_argument('--corruption_prob', type=float, default=0.4,
help='label noise')
parser.add_argument('--corruption_type', '-ctype', type=str, default='unif',
help='Type of corruption ("unif" or "flip" or "flip2").')
parser.add_argument('--num_meta', type=int, default=1000)
parser.add_argument('--epochs', default=120, type=int,
help='number of total epochs to run')
parser.add_argument('--iters', default=60000, type=int,
help='number of total iters to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('--batch_size', '--batch-size', default=100, type=int,
help='mini-batch size (default: 100)')
parser.add_argument('--lr', '--learning-rate', default=1e-1, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--layers', default=28, type=int,
help='total number of layers (default: 28)')
parser.add_argument('--widen-factor', default=10, type=int,
help='widen factor (default: 10)')
parser.add_argument('--droprate', default=0, type=float,
help='dropout probability (default: 0.0)')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='whether to use standard augmentation (default: True)')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', default='WideResNet-28-10', type=str,
help='name of experiment')
parser.add_argument('--seed', type=int, default=2)
parser.add_argument('--prefetch', type=int, default=0, help='Pre-fetching threads.')
parser.set_defaults(augment=True)
args = parser.parse_args()
use_cuda = True
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
print()
print(args)
def build_dataset():
normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],
std=[x / 255.0 for x in [63.0, 62.1, 66.7]])
if args.augment:
train_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4, 4, 4, 4), mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
test_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
if args.dataset == 'cifar10':
train_data_meta = CIFAR10(
root='./data', train=True, meta=True, num_meta=args.num_meta, corruption_prob=args.corruption_prob,
corruption_type=args.corruption_type, transform=train_transform, download=True)
train_data = CIFAR10(
root='./data', train=True, meta=False, num_meta=args.num_meta, corruption_prob=args.corruption_prob,
corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.seed)
test_data = CIFAR10(root='./data', train=False, transform=test_transform, download=True)
elif args.dataset == 'cifar100':
train_data_meta = CIFAR100(
root='./data', train=True, meta=True, num_meta=args.num_meta, corruption_prob=args.corruption_prob,
corruption_type=args.corruption_type, transform=train_transform, download=True)
train_data = CIFAR100(
root='./data', train=True, meta=False, num_meta=args.num_meta, corruption_prob=args.corruption_prob,
corruption_type=args.corruption_type, transform=train_transform, download=True, seed=args.seed)
test_data = CIFAR100(root='./data', train=False, transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
train_meta_loader = torch.utils.data.DataLoader(
train_data_meta, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
return train_loader, train_meta_loader, test_loader
def build_model():
model = ResNet32(args.dataset == 'cifar10' and 10 or 100)
if torch.cuda.is_available():
model.cuda()
torch.backends.cudnn.benchmark = True
return model
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def adjust_learning_rate(optimizer, epochs):
lr = args.lr * ((0.1 ** int(epochs >= 80)) * (0.1 ** int(epochs >= 100))) # For WRN-28-10
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def test(model, test_loader):
model.eval()
correct = 0
test_loss = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
test_loss += F.cross_entropy(outputs, targets).item()
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
accuracy))
return accuracy
def train(train_loader, train_meta_loader, model, vnet, optimizer_model, optimizer_vnet, epoch):
print('\nEpoch: %d' % epoch)
train_loss = 0
meta_loss = 0
train_meta_loader_iter = iter(train_meta_loader)
for batch_idx, (inputs, targets) in enumerate(train_loader):
model.train()
inputs, targets = inputs.to(device), targets.to(device)
meta_model = build_model().cuda()
meta_model.load_state_dict(model.state_dict())
outputs = meta_model(inputs)
cost = F.cross_entropy(outputs, targets, reduce=False)
cost_v = torch.reshape(cost, (len(cost), 1))
v_lambda = vnet(cost_v.data)
l_f_meta = torch.sum(cost_v * v_lambda) / len(cost_v)
meta_model.zero_grad()
grads = torch.autograd.grad(l_f_meta, (meta_model.params()), create_graph=True)
meta_lr = args.lr * ((0.1 ** int(epoch >= 80)) * (0.1 ** int(epoch >= 100))) # For ResNet32
meta_model.update_params(lr_inner=meta_lr, source_params=grads)
del grads
try:
inputs_val, targets_val = next(train_meta_loader_iter)
except StopIteration:
train_meta_loader_iter = iter(train_meta_loader)
inputs_val, targets_val = next(train_meta_loader_iter)
inputs_val, targets_val = inputs_val.to(device), targets_val.to(device)
y_g_hat = meta_model(inputs_val)
l_g_meta = F.cross_entropy(y_g_hat, targets_val)
prec_meta = accuracy(y_g_hat.data, targets_val.data, topk=(1,))[0]
optimizer_vnet.zero_grad()
l_g_meta.backward()
optimizer_vnet.step()
outputs = model(inputs)
cost_w = F.cross_entropy(outputs, targets, reduce=False)
cost_v = torch.reshape(cost_w, (len(cost_w), 1))
prec_train = accuracy(outputs.data, targets.data, topk=(1,))[0]
with torch.no_grad():
w_new = vnet(cost_v)
loss = torch.sum(cost_v * w_new) / len(cost_v)
optimizer_model.zero_grad()
loss.backward()
optimizer_model.step()
train_loss += loss.item()
meta_loss += l_g_meta.item()
if (batch_idx + 1) % 50 == 0:
print('Epoch: [%d/%d]\t'
'Iters: [%d/%d]\t'
'Loss: %.4f\t'
'MetaLoss:%.4f\t'
'Prec@1 %.2f\t'
'Prec_meta@1 %.2f' % (
(epoch + 1), args.epochs, batch_idx + 1, len(train_loader.dataset) / args.batch_size, (train_loss / (batch_idx + 1)),
(meta_loss / (batch_idx + 1)), prec_train, prec_meta))
a = 1
train_loader, train_meta_loader, test_loader = build_dataset()
# create model
model = build_model()
vnet = VNet(1, 100, 1).cuda()
if args.dataset == 'cifar10':
num_classes = 10
if args.dataset == 'cifar100':
num_classes = 100
optimizer_model = torch.optim.SGD(model.params(), args.lr,
momentum=args.momentum, weight_decay=args.weight_decay)
optimizer_vnet = torch.optim.Adam(vnet.params(), 1e-3,
weight_decay=1e-4)
def main():
best_acc = 0
for epoch in range(args.epochs):
adjust_learning_rate(optimizer_model, epoch)
train(train_loader, train_meta_loader, model, vnet, optimizer_model, optimizer_vnet, epoch)
test_acc = test(model=model, test_loader=test_loader)
if test_acc >= best_acc:
best_acc = test_acc
print(f'test acc @ {epoch}: {test_acc}')
print('best accuracy:', best_acc)
if __name__ == '__main__':
main()
``` |
{
"source": "JieZhang0822/pyang",
"score": 3
} |
#### File: pyang/pyang/error.py
```python
import copy
import os.path
### struct to keep track of position for error messages
class Position(object):
__slots__ = (
'ref',
'line',
'top',
'uses_pos',
)
def __init__(self, ref):
self.ref = ref
self.line = 0
self.top = None
self.uses_pos = None
def __str__(self):
return self.label()
def label(self, basename=False):
ref = self.ref
if basename:
ref = os.path.basename(ref)
s = ref + ':' + str(self.line)
if self.uses_pos is None:
return s
else:
return str(self.uses_pos) + ' (at ' + s + ')'
### Exceptions
class Abort(Exception):
"""used for non-recoverable errors to abort parsing"""
pass
class Eof(Exception):
"""raised by tokenizer when end of file is detected"""
pass
class TransformError(Exception):
"""raised by plugins to fail the transform() function"""
def __init__(self, msg="", exit_code=1):
self.msg = msg
self.exit_code = exit_code
class EmitError(Exception):
"""raised by plugins to fail the emit() function"""
def __init__(self, msg="", exit_code=1):
self.msg = msg
self.exit_code = exit_code
### error codes
## level:
## 1: critical error, can not be made into a warning
## 2: major error, can not be made into a warning
## 3: minor error, can be made into warning with -W
## 4: warning
error_codes = \
{
'READ_ERROR':
(1,
'read error: %s'),
'EOF_ERROR':
(1,
'premature end of file'),
'EXPECTED_QUOTED_STRING':
(1,
'expected quoted string after \'+\' operator'),
'UNKNOWN_KEYWORD':
(1,
'unknown keyword "%s"'),
'INCOMPLETE_STATEMENT':
(1,
'unterminated statement definition for keyword "%s", looking at %s'),
'EXPECTED_KEYWORD':
(1,
'expected keyword "%s"'),
'EXPECTED_KEYWORD_2':
(1,
'expected keyword "%s" as child to "%s"'),
'EXPECTED_DATA_DEF':
(1,
'expected a data definition statement as child to "%s"'),
'UNEXPECTED_KEYWORD':
(1,
'unexpected keyword "%s"'),
'UNEXPECTED_KEYWORD_1':
(1,
'unexpected keyword "%s", expected "%s"'),
'UNEXPECTED_KEYWORD_N':
(1,
'unexpected keyword "%s", expected one of %s'),
'UNEXPECTED_KEYWORD_CANONICAL':
(1,
'keyword "%s" not in canonical order (see RFC 6020, Section 12)'),
'UNEXPECTED_KEYWORD_CANONICAL_1':
(1,
'keyword "%s" not in canonical order, ' \
'expected "%s" (see RFC 6020, Section 12)'),
'UNEXPECTED_KEYWORD_USES':
(1,
'unexpected keyword "%s" under "%s", defined at %s'),
'UNEXPECTED_KEYWORD_AUGMENT':
(1,
'unexpected keyword "%s" under "%s", defined at %s'),
'EXPECTED_ARGUMENT':
(1,
'expected an argument for keyword "%s"'),
'UNEXPECTED_ARGUMENT':
(1,
'did not expect an argument, got "%s"'),
'XML_IDENTIFIER':
(3,
'illegal identifier "%s", must not start with [xX][mM][lL] in' \
' YANG version 1 (see RFC 6020, Section 12)'),
'TRAILING_GARBAGE':
(2,
'trailing garbage after module'),
'BAD_VALUE':
(1,
'bad value "%s" (should be %s)'),
'CIRCULAR_DEPENDENCY':
(1,
'circular dependency for %s "%s"'),
'MODULE_NOT_FOUND':
(1,
'module "%s" not found in search path'),
'MODULE_NOT_FOUND_REV':
(1,
'module "%s" revision "%s" not found in search path'),
'MODULE_NOT_IMPORTED':
(1,
'no module with the namespace "%s" is imported'),
'BAD_IMPORT':
(1,
'cannot import %s "%s", must be a module'),
'BAD_IMPORT_YANG_VERSION':
(1,
'a version %s module cannot import a version %s module by revision'),
'BAD_INCLUDE':
(1,
'cannot include %s "%s", must be a submodule'),
'BAD_INCLUDE_YANG_VERSION':
(1,
'cannot include a version %s submodule in a version %s module'),
'BAD_MODULE_NAME':
(2,
'unexpected modulename "%s" in %s, should be "%s"'),
'WBAD_MODULE_NAME':
(4,
'unexpected modulename "%s" in %s, should be "%s"'),
'FILENAME_BAD_MODULE_NAME':
(4,
'filename "%s" suggests invalid module name "%s", should match "%s"'),
'BAD_REVISION':
(3,
'unexpected latest revision "%s" in %s, should be "%s"'),
'WBAD_REVISION':
(4,
'unexpected latest revision "%s" in %s, should be "%s"'),
'FILENAME_BAD_REVISION':
(4,
'filename "%s" suggests invalid revision "%s", should match "%s"'),
'BAD_SUB_BELONGS_TO':
(1,
'module "%s" includes "%s", but "%s" does not specify a ' \
'correct belongs-to'),
'MISSING_INCLUDE':
(1,
'submodule %s is included by %s, but not by the module %s'),
'PREFIX_ALREADY_USED':
(1,
'prefix "%s" already used for module %s'),
'PREFIX_NOT_DEFINED':
(1,
'prefix "%s" is not defined (reported only once)'),
'WPREFIX_NOT_DEFINED':
(4,
'"%s" looks like a prefix but is not defined'),
'NODE_NOT_FOUND':
(1,
'node %s::%s is not found'),
'BAD_NODE_IN_AUGMENT':
(1,
'node %s::%s of type %s cannot be augmented'),
'BAD_TARGET_NODE':
(1,
'node %s::%s of type %s cannot be target node'),
'BAD_NODE_IN_REFINE':
(1,
'node %s::%s cannot be refined'),
'BAD_REFINEMENT':
(1,
'"%s" node "%s::%s" cannot be refined with "%s"'),
'BAD_DEVIATE_KEY':
(2,
'key node "%s::%s" cannot be deviated with "not-supported"'),
'BAD_DEVIATE_ADD':
(2,
'the "%s" property already exists in node "%s::%s"'),
'BAD_DEVIATE_REP':
(2,
'the "%s" property does not exist in node "%s::%s"'),
'BAD_DEVIATE_DEL':
(2,
'the "%s" property does not exist in node "%s::%s"'),
'BAD_DEVIATE_DEL2':
(2,
'the "%s" property connot be deviate deleted in node "%s::%s"'),
'BAD_DEVIATE_TYPE':
(2,
'the "%s" property cannot be added'),
'BAD_DEVIATE_WITH_NOT_SUPPORTED':
(2,
'cannot have other deviate statement together with "not-supported"'),
'EXTENSION_NOT_DEFINED':
(1,
'extension "%s" is not defined in module %s'),
'TYPE_NOT_FOUND':
(1,
'type "%s" not found in module "%s"'),
'FEATURE_NOT_FOUND':
(1,
'feature "%s" not found in module "%s"'),
'IDENTITY_NOT_FOUND':
(1,
'identity "%s" not found in module "%s"'),
'GROUPING_NOT_FOUND':
(1,
'grouping "%s" not found in module "%s"'),
'DEFAULT_CASE_NOT_FOUND':
(1,
'the default case "%s" is not found"'),
'MANDATORY_NODE_IN_DEFAULT_CASE':
(1,
'mandatory node in default case'),
'MULTIPLE_REFINE':
(1,
'the node "%s" is already refined at %s'),
'RANGE_BOUNDS':
(2,
'range error: "%s" is not larger than "%s"'),
'LENGTH_BOUNDS':
(2,
'length error: "%s" is not larger than "%s"'),
'TYPE_VALUE':
(2,
'the value "%s" does not match its base type %s- %s'),
'DUPLICATE_ENUM_NAME':
(1,
'the enum name "%s" has already been used for the ' \
'enumeration at %s'),
'DUPLICATE_ENUM_VALUE':
(1,
'the integer value "%d" has already been used for the ' \
'enumeration at %s'),
'ENUM_VALUE':
(1,
'the enumeration value "%s" is not an 32 bit integer'),
'BAD_ENUM_VALUE':
(1,
'the given value "%s" does not match the base enum value "%d"'),
'DUPLICATE_BIT_POSITION':
(1,
'the position "%d" has already been used for the bit at %s'),
'BIT_POSITION':
(1,
'the position value "%s" is not valid'),
'BAD_BIT_POSITION':
(1,
'the given position "%s" does not match the base bit position "%d"'),
'NEED_KEY':
(1,
'the list needs at least one key'),
'NEED_KEY_USES':
(1,
'the list at "%s" needs at least one key because it is used as config'),
'KEY_BAD_CONFIG':
(1,
'the key "%s" does not have same "config" as its list'),
'BAD_KEY':
(1,
'the key "%s" does not reference an existing leaf'),
'BAD_UNIQUE':
(1,
'the unique argument "%s" does not reference an existing leaf'),
'BAD_UNIQUE_PART':
(1,
'the identifier "%s" in the unique argument does not reference '
'an existing container'),
'BAD_UNIQUE_PART_LIST':
(1,
'the identifier "%s" in the unique argument references a list; '
'this is not legal'),
'BAD_UNIQUE_CONFIG':
(1,
'the identifer "%s" has not the same config property as the'
' other nodes in the unique expression'),
'ILLEGAL_ESCAPE':
(1,
'the escape sequence "\\%s" is illegal in double quoted strings'),
'ILLEGAL_ESCAPE_WARN':
(4,
'the escape sequence "\\%s" is unsafe in double quoted strings' \
' - pass the flag --lax-quote-checks to avoid this warning'),
'UNIQUE_IS_KEY':
(4,
'all keys in the list are redundantly present in the unique statement'),
'DUPLICATE_KEY':
(2,
'the key "%s" must not be listed more than once'),
'DUPLICATE_UNIQUE':
(3,
'the leaf "%s" occurs more than once in the unique expression'),
'PATTERN_ERROR':
(2,
'syntax error in pattern: %s'),
'LEAFREF_TOO_MANY_UP':
(1,
'the path for %s at %s has too many ".."'),
'LEAFREF_IDENTIFIER_NOT_FOUND':
(1,
'"%s:%s" in the path for %s at %s is not found'),
'LEAFREF_IDENTIFIER_BAD_NODE':
(1,
'"%s:%s" in the path for %s at %s references a %s node'),
'LEAFREF_BAD_PREDICATE':
(1,
'"%s:%s" in the path for %s at %s has a predicate, '
'but is not a list'),
'LEAFREF_BAD_PREDICATE_PTR':
(1,
'"%s:%s" in the path\'s predicate for %s at %s is compared '
'with a node that is not a leaf'),
'LEAFREF_NOT_LEAF':
(1,
'the path for %s at %s does not refer to a leaf'),
'LEAFREF_NO_KEY':
(1,
'"%s:%s" in the path for %s at %s is not the name of a key leaf'),
'LEAFREF_MULTIPLE_KEYS':
(1,
'"%s:%s" in the path for %s at %s is referenced more than once'),
'LEAFREF_BAD_CONFIG':
(1,
'the path for %s is config but refers to a '
'non-config leaf "%s" defined at %s'),
'LEAFREF_DEREF_NOT_LEAFREF':
(1,
'the deref argument refers to node "%s" at %s which is'
' not a leafref leaf'),
'LEAFREF_DEREF_NOT_KEY':
(1,
'the deref argument refers to node "%s" at %s which'
' does not refer to a key (%s at %s)'),
'LEAFREF_TO_NOT_IMPLEMENTED':
(1,
'the leafref refer to a node that is not implemented'),
'DUPLICATE_CHILD_NAME':
(1,
'there is already a child node to "%s" at %s with the name "%s" '
'defined at %s'),
'BAD_ANCESTOR':
(1,
'"%s" node cannot have an ancestor list node without a key'),
'BAD_ANCESTOR2':
(1,
'"%s" node cannot have an ancestor "%s" node'),
'BAD_TYPE_NAME':
(1,
'illegal type name "%s"'),
'TYPE_ALREADY_DEFINED':
(1,
'type name "%s" is already defined at %s'),
'GROUPING_ALREADY_DEFINED':
(1,
'grouping name "%s" is already defined at %s'),
'FEATURE_ALREADY_DEFINED':
(1,
'feature name "%s" is already defined at %s'),
'IDENTITY_ALREADY_DEFINED':
(1,
'identity name "%s" is already defined at %s'),
'EXTENSION_ALREADY_DEFINED':
(1,
'extension name "%s" is already defined at %s'),
'BAD_RESTRICTION':
(1,
'restriction "%s" not allowed for this base type'),
'BAD_DEFAULT_VALUE':
(1,
'the type "%s" cannot have a default value'),
'MISSING_TYPE_SPEC':
(1,
'a type "%s" must have at least one "%s" statement'),
'MISSING_TYPE_SPEC_1':
(1,
'a type "%s" must have a "%s" statement'),
'BAD_TYPE_IN_UNION':
(1,
'the type "%s" (defined at %s) cannot be part of a union'),
'BAD_TYPE_IN_KEY':
(1,
'the type "%s" cannot be part of a key, used by leaf "%s"'),
'KEY_BAD_SUBSTMT':
(1,
'the statement "%s" cannot be given for a key'),
'DEFAULT_AND_IFFEATURE':
(1,
'a \'default\' value cannot be given in leaf node when'
' \'if-feature\' is existing'),
'DEFAULT_AND_MANDATORY':
(1,
'a \'default\' value cannot be given when \'mandatory\' is "true"'),
'DEFAULT_AND_MIN_ELEMENTS':
(1,
'a \'default\' value cannot be given when \'min-elements\' is'
' greater than 0'),
'MAX_ELEMENTS_AND_MIN_ELEMENTS':
(1,
'a \'min-elements\' value cannot be greater than \'max-elements\' value'),
'DUPLICATE_DEFAULT':
(1,
'the default value "%s" is given twice in the leaf list'),
'BAD_STATUS_REFERENCE':
(2,
'the "%s" definition is %s, but the "%s" it references is %s'),
'REVISION_ORDER':
(4,
'the revision statements are not given in reverse chronological order'),
'EXTENSION_ARGUMENT_PRESENT':
(1,
'unexpected argument for extension "%s"'),
'EXTENSION_NO_ARGUMENT_PRESENT':
(1,
'expected argument for extension "%s"'),
'SYNTAX_ERROR':
(1,
'syntax error: %s'),
'DUPLICATE_NAMESPACE':
(1,
'duplicate namespace uri "%s" found in modules "%s"'),
'MISSING_ARGUMENT_ATTRIBUTE':
(1,
'missing argument attribute "%s" for "%s"'),
'MISSING_ARGUMENT_ELEMENT':
(1,
'missing argument element "%s" for "%s"'),
'UNEXPECTED_ATTRIBUTE':
(1,
'unexpected attribute %s'),
'INVALID_CONFIG':
(2,
'config true cannot be set when the parent is config false'),
'XPATH_SYNTAX_ERROR':
(2,
'XPath syntax error: %s'),
'XPATH_VARIABLE':
(2,
'XPath variable "%s" is not defined in the XPath context'),
'XPATH_FUNCTION':
(2,
'XPath function "%s" is not defined in the XPath context'),
'XPATH_FUNC_ARGS':
(2,
'XPath function "%s" takes %s arguments but called with %s.'),
'XPATH_NODE_NOT_FOUND1':
(4,
'node "%s::%s" is not found in "%s::%s"'),
'XPATH_NODE_NOT_FOUND2':
(4,
'node "%s::%s" is not found in module "%s"'),
'XPATH_REF_CONFIG_FALSE':
(4,
'node "%s::%s" is config false and is not part of the accessible tree'),
'XPATH_PATH_TOO_MANY_UP':
(2,
'the path has too many ".."'),
# 'XPATH_FUNCTION_RET_VAL':
# (2,
# 'XPath function "%s" does not return a %s'),
'AUGMENT_MANDATORY':
(1,
'cannot augment with mandatory node "%s"'),
'LONG_IDENTIFIER':
(3,
'identifier "%s" exceeds %s characters'),
'CONFIG_IGNORED':
(4,
'explicit config statement is ignored'),
'UNUSED_IMPORT':
(4,
'imported module "%s" not used'),
'UNUSED_TYPEDEF':
(4,
'locally scoped typedef "%s" not used'),
'UNUSED_GROUPING':
(4,
'locally scoped grouping "%s" not used'),
'KEY_HAS_DEFAULT':
(4,
'default value for a key leaf is ignored'),
'KEY_HAS_MANDATORY_FALSE':
(4,
'"mandatory" statement for a key leaf is ignored'),
'LONG_LINE':
(4,
'line length %s exceeds %s characters'),
'STRICT_XPATH_FUNCTION':
(2,
'XPath function "%s" is not allowed for strict YANG compliance'),
}
def add_error_code(tag, level, fmt):
"""Add an error code to the framework.
Can be used by plugins to add special errors."""
error_codes[tag] = (level, fmt)
def err_level(tag):
try:
(level, fmt) = error_codes[tag]
return level
except KeyError:
return 0
def err_to_str(tag, args):
try:
(level, fmt) = error_codes[tag]
return fmt % args
except KeyError:
return 'unknown error %s' % tag
def err_add(errors, pos, tag, args):
error = (copy.copy(pos), tag, args)
# surely this can be done more elegant??
for p, t, a in errors:
if (p.line == pos.line and p.ref == pos.ref and
p.top == pos.top and t == tag and a == args):
return
errors.append(error)
def is_warning(level):
return not is_error(level)
def is_error(level):
return level < 4
def allow_warning(level):
return level > 2
``` |
{
"source": "jiezhangxl/PointCNN-FI-Conv",
"score": 2
} |
#### File: jiezhangxl/PointCNN-FI-Conv/pointcnn.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import pointfly as pf
import tensorflow as tf
def ficonv(pts, fts, qrs, tag, N, K1, mm, sigma, scale, K, D, P, C, C_pts_fts, kernel_num, is_training, with_kernel_registering, with_kernel_shape_comparison,
with_point_transformation, with_feature_transformation, with_learning_feature_transformation, kenel_initialization_method, depth_multiplier, sorting_method=None, with_global=False):
Dis, indices_dilated = pf.knn_indices_general(qrs, pts, K*D, True)
indices = indices_dilated[:, :, ::D, :]
if sorting_method is not None:
indices = pf.sort_points(pts, indices, sorting_method)
nn_pts = tf.gather_nd(pts, indices, name=tag + 'nn_pts') # (N, P, K, 3)
nn_pts_center = tf.expand_dims(qrs, axis=2, name=tag + 'nn_pts_center') # (N, P, 1, 3)
nn_pts_local = tf.subtract(nn_pts, nn_pts_center, name=tag+'nn_pts_local') # (N, P, K, 3)
if with_point_transformation or with_feature_transformation:
X_0 = pf.conv2d(nn_pts_local, K * K, tag + 'X_0', is_training, (1, K))
X_0_KK = tf.reshape(X_0, (N, P, K, K), name=tag + 'X_0_KK')
X_1 = pf.depthwise_conv2d(X_0_KK, K, tag + 'X_1', is_training, (1, K))
X_1_KK = tf.reshape(X_1, (N, P, K, K), name=tag + 'X_1_KK')
X_2 = pf.depthwise_conv2d(X_1_KK, K, tag + 'X_2', is_training, (1, K), activation=None)
X_2_KK = tf.reshape(X_2, (N, P, K, K), name=tag + 'X_2_KK')
if with_point_transformation:
if with_learning_feature_transformation:
nn_pts_local = tf.matmul(X_2_KK, nn_pts_local)
# Prepare features to be transformed
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
else:
# Prepare features to be transformed
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
nn_pts_local = tf.matmul(X_2_KK, nn_pts_local)
else:
if with_learning_feature_transformation:
nn_pts_local_ = tf.matmul(X_2_KK, nn_pts_local, name=tag+'nn_pts_local_')
# Prepare features to be transformed
nn_fts_from_pts_0 = pf.dense(nn_pts_local_, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
else:
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
if fts is None:
nn_fts_input = nn_fts_from_pts
else:
nn_fts_from_prev = tf.gather_nd(fts, indices, name=tag + 'nn_fts_from_prev')
nn_fts_input = tf.concat([nn_fts_from_pts, nn_fts_from_prev], axis=-1, name=tag + 'nn_fts_input')
P1 = tf.shape(nn_pts_local)[1]
dim1 = 3
if with_kernel_registering:
######################## preparing #########################
if with_feature_transformation:
nn_fts_input = tf.matmul(X_2_KK, nn_fts_input)
r_data = tf.reduce_sum(nn_pts_local * nn_pts_local, axis=3, keep_dims=True, name=tag+'kernel_pow')
######################## kernel-registering #########################
shape_id = 0
if kenel_initialization_method == 'random':
kernel_shape=tf.Variable(tf.random_uniform([K1,dim1], minval=-0.5, maxval=0.5, dtype=tf.float32), name=tag+'kernel_shape'+str(shape_id))
else:
kernel_shape=tf.Variable(tf.random_normal([K1,dim1], mean=0.0, stddev=1.0, dtype=tf.float32), name=tag+'kernel_shape'+str(shape_id))
kernel_shape_dis = tf.sqrt(tf.reduce_sum(kernel_shape * kernel_shape, axis=1), name=tag+'kernel_shape_dis'+str(shape_id))
kernel_shape_normal = scale * tf.div(kernel_shape,tf.reduce_max(kernel_shape_dis), name=tag+'kernel_shape_normal'+str(shape_id))
r_kernel = tf.reduce_sum(kernel_shape_normal * kernel_shape_normal, axis=1, keep_dims=True, name=tag+'kernel_pow'+str(shape_id))
reshape_data = tf.reshape(nn_pts_local, [N*P1*K,dim1], name=tag+'reshape_kernel'+str(shape_id))
m = tf.reshape( tf.matmul(reshape_data, tf.transpose(kernel_shape_normal)), [N, P1, K, K1], name=tag+'mm'+str(shape_id))
dis_matrix = tf.transpose(r_data-2*m+tf.transpose(r_kernel),perm=[0,1,3,2],name=tag+'dis_matrix'+str(shape_id))
coef_matrix = tf.exp(tf.div(-dis_matrix,sigma), name=tag+'coef_matrix'+str(shape_id))
#coef_matrix = tf.transpose(r_data-2*m+tf.transpose(r_kernel),perm=[0,1,3,2],name=tag+'coef_matrix'+str(shape_id))
if with_kernel_shape_comparison:
coef_global = tf.reduce_sum(coef_matrix, axis=[2,3], keep_dims=True)/K
coef_normal = coef_global * tf.div(coef_matrix,tf.reduce_sum(coef_matrix , axis = 3 , keep_dims=True), name=tag+'coef_normal'+str(shape_id))
else:
coef_normal = tf.div(coef_matrix,tf.reduce_sum(coef_matrix , axis = 3 , keep_dims=True), name=tag+'coef_normal'+str(shape_id))
fts_X = tf.matmul(coef_normal, nn_fts_input, name=tag+'fts_X'+str(shape_id))
###################################################################
fts_conv = pf.separable_conv2d(fts_X, math.ceil(mm*C/kernel_num), tag+'fts_conv'+str(shape_id), is_training, (1, K1), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag+'fts_conv_3d'+str(shape_id))
for shape_id in range(kernel_num - 1):
shape_id = shape_id + 1
if kenel_initialization_method == 'random':
kernel_shape=tf.Variable(tf.random_uniform([K1,dim1], minval=-0.5, maxval=0.5, dtype=tf.float32), name=tag+'kernel_shape'+str(shape_id))
else:
kernel_shape=tf.Variable(tf.random_normal([K1,dim1], mean=0.0, stddev=1.0, dtype=tf.float32), name=tag+'kernel_shape'+str(shape_id))
kernel_shape_dis = tf.sqrt(tf.reduce_sum(kernel_shape * kernel_shape, axis=1), name=tag+'kernel_shape_dis'+str(shape_id))
kernel_shape_normal = scale * tf.div(kernel_shape,tf.reduce_max(kernel_shape_dis), name=tag+'kernel_shape_normal'+str(shape_id))
r_kernel = tf.reduce_sum(kernel_shape_normal * kernel_shape_normal, axis=1, keep_dims=True, name=tag+'kernel_pow'+str(shape_id))
reshape_data = tf.reshape(nn_pts_local, [N*P1*K,dim1], name=tag+'reshape_kernel'+str(shape_id))
m = tf.reshape( tf.matmul(reshape_data, tf.transpose(kernel_shape_normal)), [N, P1, K, K1], name=tag+'mm'+str(shape_id))
dis_matrix = tf.transpose(r_data-2*m+tf.transpose(r_kernel),perm=[0,1,3,2],name=tag+'dis_matrix'+str(shape_id))
coef_matrix = tf.exp(tf.div(-dis_matrix,sigma), name=tag+'coef_matrix'+str(shape_id))
#coef_matrix = tf.transpose(r_data-2*m+tf.transpose(r_kernel),perm=[0,1,3,2],name=tag+'coef_matrix'+str(shape_id))
if with_kernel_shape_comparison:
coef_global = tf.reduce_sum(coef_matrix, axis=[2,3], keep_dims=True)/K
coef_normal = coef_global * tf.div(coef_matrix,tf.reduce_sum(coef_matrix , axis = 3 , keep_dims=True), name=tag+'coef_normal'+str(shape_id))
else:
coef_normal = tf.div(coef_matrix,tf.reduce_sum(coef_matrix , axis = 3 , keep_dims=True), name=tag+'coef_normal'+str(shape_id))
fts_X = tf.matmul(coef_normal, nn_fts_input, name=tag+'fts_X'+str(shape_id))
###################################################################
fts_conv = pf.separable_conv2d(fts_X, math.ceil(mm*C/kernel_num), tag+'fts_conv'+str(shape_id), is_training, (1, K1), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.concat([fts_conv_3d, tf.squeeze(fts_conv, axis=2)], axis = -1 , name=tag+'fts_conv_3d'+str(shape_id))
else:
fts_X = nn_fts_input
fts_conv = pf.separable_conv2d(fts_X, C, tag + 'fts_conv', is_training, (1, K), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag + 'fts_conv_3d')
if with_global:
fts_global_0 = pf.dense(qrs, C // 4, tag + 'fts_global_0', is_training)
fts_global = pf.dense(fts_global_0, C // 4, tag + 'fts_global', is_training)
return tf.concat([fts_global, fts_conv_3d], axis=-1, name=tag + 'fts_conv_3d_with_global')
else:
return fts_conv_3d
def xdeconv(pts, fts, qrs, tag, N, K, D, P, C, C_pts_fts, is_training, with_X_transformation, depth_multiplier,
sorting_method=None, with_global=False):
_, indices_dilated = pf.knn_indices_general(qrs, pts, K * D, True)
indices = indices_dilated[:, :, ::D, :]
if sorting_method is not None:
indices = pf.sort_points(pts, indices, sorting_method)
nn_pts = tf.gather_nd(pts, indices, name=tag + 'nn_pts') # (N, P, K, 3)
nn_pts_center = tf.expand_dims(qrs, axis=2, name=tag + 'nn_pts_center') # (N, P, 1, 3)
nn_pts_local = tf.subtract(nn_pts, nn_pts_center, name=tag + 'nn_pts_local') # (N, P, K, 3)
# Prepare features to be transformed
nn_fts_from_pts_0 = pf.dense(nn_pts_local, C_pts_fts, tag + 'nn_fts_from_pts_0', is_training)
nn_fts_from_pts = pf.dense(nn_fts_from_pts_0, C_pts_fts, tag + 'nn_fts_from_pts', is_training)
if fts is None:
nn_fts_input = nn_fts_from_pts
else:
nn_fts_from_prev = tf.gather_nd(fts, indices, name=tag + 'nn_fts_from_prev')
nn_fts_input = tf.concat([nn_fts_from_pts, nn_fts_from_prev], axis=-1, name=tag + 'nn_fts_input')
if with_X_transformation:
######################## X-transformation #########################
X_0 = pf.conv2d(nn_pts_local, K * K, tag + 'X_0', is_training, (1, K))
X_0_KK = tf.reshape(X_0, (N, P, K, K), name=tag + 'X_0_KK')
X_1 = pf.depthwise_conv2d(X_0_KK, K, tag + 'X_1', is_training, (1, K))
X_1_KK = tf.reshape(X_1, (N, P, K, K), name=tag + 'X_1_KK')
X_2 = pf.depthwise_conv2d(X_1_KK, K, tag + 'X_2', is_training, (1, K), activation=None)
X_2_KK = tf.reshape(X_2, (N, P, K, K), name=tag + 'X_2_KK')
fts_X = tf.matmul(X_2_KK, nn_fts_input, name=tag + 'fts_X')
###################################################################
else:
fts_X = nn_fts_input
fts_conv = pf.separable_conv2d(fts_X, C, tag + 'fts_conv', is_training, (1, K), depth_multiplier=depth_multiplier)
fts_conv_3d = tf.squeeze(fts_conv, axis=2, name=tag + 'fts_conv_3d')
if with_global:
fts_global_0 = pf.dense(qrs, C // 4, tag + 'fts_global_0', is_training)
fts_global = pf.dense(fts_global_0, C // 4, tag + 'fts_global', is_training)
return tf.concat([fts_global, fts_conv_3d], axis=-1, name=tag + 'fts_conv_3d_with_global')
else:
return fts_conv_3d
class PointCNN:
def __init__(self, points, features, is_training, setting):
xconv_params = setting.xconv_params
fc_params = setting.fc_params
with_X_transformation = setting.with_X_transformation
with_kernel_registering = setting.with_kernel_registering
with_kernel_shape_comparison = setting.with_kernel_shape_comparison
with_point_transformation = setting.with_point_transformation
with_feature_transformation = setting.with_feature_transformation
with_learning_feature_transformation = setting.with_learning_feature_transformation
kenel_initialization_method = setting.kenel_initialization_method
sorting_method = setting.sorting_method
N = tf.shape(points)[0]
kernel_num = setting.kernel_num
if setting.sampling == 'fps':
from sampling import tf_sampling
self.layer_pts = [points]
if features is None:
self.layer_fts = [features]
else:
features = tf.reshape(features, (N, -1, setting.data_dim - 3), name='features_reshape')
C_fts = xconv_params[0]['C'] // 2
features_hd = pf.dense(features, C_fts, 'features_hd', is_training)
self.layer_fts = [features_hd]
# self.Dis = []
# self.nn_pts_local = []
for layer_idx, layer_param in enumerate(xconv_params):
tag = 'xconv_' + str(layer_idx + 1) + '_'
K1 = layer_param['K1']
mm = layer_param['mm']
sigma = layer_param['sigma']
scale = layer_param['scale']
K = layer_param['K']
D = layer_param['D']
P = layer_param['P']
C = layer_param['C']
links = layer_param['links']
if setting.sampling != 'random' and links:
print('Error: flexible links are supported only when random sampling is used!')
exit()
# get k-nearest points
pts = self.layer_pts[-1]
fts = self.layer_fts[-1]
if P == -1 or (layer_idx > 0 and P == xconv_params[layer_idx - 1]['P']):
qrs = self.layer_pts[-1]
else:
if setting.sampling == 'fps':
fps_indices = tf_sampling.farthest_point_sample(P, pts)
batch_indices = tf.tile(tf.reshape(tf.range(N), (-1, 1, 1)), (1, P, 1))
indices = tf.concat([batch_indices, tf.expand_dims(fps_indices,-1)], axis=-1)
qrs = tf.gather_nd(pts, indices, name= tag + 'qrs') # (N, P, 3)
elif setting.sampling == 'ids':
indices = pf.inverse_density_sampling(pts, K, P)
qrs = tf.gather_nd(pts, indices)
elif setting.sampling == 'random':
qrs = tf.slice(pts, (0, 0, 0), (-1, P, -1), name=tag + 'qrs') # (N, P, 3)
else:
print('Unknown sampling method!')
exit()
self.layer_pts.append(qrs)
if layer_idx == 0:
C_pts_fts = C // 2 if fts is None else C // 4
depth_multiplier = 4
else:
C_prev = xconv_params[layer_idx - 1]['C']
C_pts_fts = C_prev // 4
depth_multiplier = math.ceil(C / C_prev)
with_global = (setting.with_global and layer_idx == len(xconv_params) - 1)
fts_xconv= ficonv(pts, fts, qrs, tag, N, K1, mm, sigma, scale, K, D, P, C, C_pts_fts, kernel_num, is_training, with_kernel_registering, with_kernel_shape_comparison,
with_point_transformation, with_feature_transformation, with_learning_feature_transformation, kenel_initialization_method, depth_multiplier, sorting_method, with_global)
#self.Dis.append(Dis_)
#self.nn_pts_local.append(nn_pts_local_)
fts_list = []
for link in links:
fts_from_link = self.layer_fts[link]
if fts_from_link is not None:
fts_slice = tf.slice(fts_from_link, (0, 0, 0), (-1, P, -1), name=tag + 'fts_slice_' + str(-link))
fts_list.append(fts_slice)
if fts_list:
fts_list.append(fts_xconv)
self.layer_fts.append(tf.concat(fts_list, axis=-1, name=tag + 'fts_list_concat'))
else:
self.layer_fts.append(fts_xconv)
if hasattr(setting, 'xdconv_params'):
for layer_idx, layer_param in enumerate(setting.xdconv_params):
tag = 'xdconv_' + str(layer_idx + 1) + '_'
K = layer_param['K']
D = layer_param['D']
pts_layer_idx = layer_param['pts_layer_idx']
qrs_layer_idx = layer_param['qrs_layer_idx']
pts = self.layer_pts[pts_layer_idx + 1]
fts = self.layer_fts[pts_layer_idx + 1] if layer_idx == 0 else self.layer_fts[-1]
qrs = self.layer_pts[qrs_layer_idx + 1]
fts_qrs = self.layer_fts[qrs_layer_idx + 1]
P = xconv_params[qrs_layer_idx]['P']
C = xconv_params[qrs_layer_idx]['C']
C_prev = xconv_params[pts_layer_idx]['C']
C_pts_fts = C_prev // 4
depth_multiplier = 1
fts_xdconv = xdeconv(pts, fts, qrs, tag, N, K, D, P, C, C_pts_fts, is_training, with_X_transformation,
depth_multiplier, sorting_method)
fts_concat = tf.concat([fts_xdconv, fts_qrs], axis=-1, name=tag + 'fts_concat')
fts_fuse = pf.dense(fts_concat, C, tag + 'fts_fuse', is_training)
self.layer_pts.append(qrs)
self.layer_fts.append(fts_fuse)
self.fc_layers = [self.layer_fts[-1]]
for layer_idx, layer_param in enumerate(fc_params):
C = layer_param['C']
dropout_rate = layer_param['dropout_rate']
fc = pf.dense(self.fc_layers[-1], C, 'fc{:d}'.format(layer_idx), is_training)
fc_drop = tf.layers.dropout(fc, dropout_rate, training=is_training, name='fc{:d}_drop'.format(layer_idx))
self.fc_layers.append(fc_drop)
``` |
{
"source": "JieZheng-ShanghaiTech/HiCoEx",
"score": 3
} |
#### File: src/data_preprocessing/01_gene_expression.py
```python
import argparse
import os
import pandas as pd
def main(args):
dataset_path = '../../data/{}'.format(args.dataset)
if not os.path.exists(dataset_path + '/expression_raw.csv'):
tcga = pd.read_csv(args.input, delimiter='\t')
print('Gene expression data loaded:', tcga.shape[0], 'genes and', tcga.shape[1] - 1, 'samples')
low_expression_genes = (tcga == 0).sum(axis=1) <= tcga.shape[1] * 0.2
print(tcga.shape[0] - low_expression_genes.sum(), 'genes out of', tcga.shape[0],
'have more that 80% of samples with 0 expression. Removed')
tcga = tcga[low_expression_genes]
gene_info = pd.read_csv(args.gene_info, delimiter='\t')
print('Merging gene expression data with gene information from Ensembl hg19')
tcga = gene_info.merge(tcga, right_on='sample', left_on='Gene name', )
tcga = tcga.drop('sample', axis=1)
print('Removing duplicated gene entries and keeping the ones with the outermost TSS')
tcga_pos = tcga[tcga['Strand'] == 1]
tcga_neg = tcga[tcga['Strand'] == -1]
tcga_pos = tcga_pos.groupby(['Gene name']).min()
tcga_neg = tcga_neg.groupby(['Gene name']).max()
tcga = pd.concat([tcga_neg, tcga_pos])
tcga = tcga.groupby(['Gene name']).max()
print('Final gene expression data with', tcga.shape[0], 'genes')
if not os.path.exists(dataset_path):
os.mkdir(dataset_path)
print('Saving in', dataset_path + '/expression_raw.csv')
tcga.to_csv(dataset_path + '/expression_raw.csv')
else:
print('Expression already computed. Skipped.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True,
help='Gene expression input file path downloaded from Xena Browser')
parser.add_argument('--dataset', type=str, default='breast_normal',
help='Name that will be used to identify the dataset')
parser.add_argument('--gene-info', type=str, default='../../data/GRCh37_p13_gene_info.txt',
help='Path of the txt file containing the association gene name - TSS')
args = parser.parse_args()
main(args)
```
#### File: src/data_preprocessing/02_hic_juicer.py
```python
import argparse
import os
import numpy as np
import pandas as pd
import scipy.sparse as sps
import matplotlib.pyplot as plt
def main(args):
print('Loading using juicer')
chromosomes = range(1, 23) if args.chromosomes is None else args.chromosomes
dataset_path = '../../data/{}/hic_raw/'.format(args.dataset)
if not os.path.exists(dataset_path):
os.makedirs(dataset_path, exist_ok=True)
for i, chr_source in enumerate(chromosomes):
if args.inter:
chromosomes_target = chromosomes[i:]
else:
chromosomes_target = [chr_source]
for chr_target in chromosomes_target:
print('Downloading interactions between chr.', chr_source, 'and chr.', chr_target)
output_hic = 'hic_raw_{}_{}_{}.npz'.format(chr_source, chr_target, args.window)
output_path = os.path.join(dataset_path, output_hic)
if not os.path.exists(output_path):
# import ipdb
# ipdb.set_trace()
output_original = 'hic_raw_{}_{}_{}.txt'.format(chr_source, chr_target, args.resolution)
original_path = os.path.join(dataset_path, output_original)
os.system('java -jar {} '.format(args.juicer_path) +
'dump observed NONE {} '.format(args.input) +
'{} {} '.format(chr_source, chr_target) +
'BP {} '.format(args.resolution) +
'{}'.format(original_path))
hic = pd.read_csv(original_path, delim_whitespace=True, header=None)
contact_matrix = sps.csr_matrix(
(hic.iloc[:, 2], (hic.iloc[:, 0] // args.resolution, hic.iloc[:, 1] // args.resolution)))
if args.window > args.resolution:
contact_matrix_agg = np.add.reduceat(contact_matrix.toarray(),
np.arange(0, contact_matrix.shape[0], args.window // args.resolution),
axis=0)
contact_matrix_agg = np.add.reduceat(contact_matrix_agg,
np.arange(0, contact_matrix.shape[1], args.window // args.resolution),
axis=1)
contact_matrix = sps.csr_matrix(contact_matrix_agg)
# import ipdb
# ipdb.set_trace()
sps.save_npz(output_path, contact_matrix)
os.remove(original_path)
else:
print('File already existing. Skip.')
if args.save_plot:
contact_matrix = sps.load_npz(output_path)
if args.save_plot:
plot_path = '../../data/plots/{}/hic_raw'.format(args.dataset)
if not os.path.exists(plot_path):
os.makedirs(plot_path, exist_ok=True)
plt.figure(dpi=200)
plt.imshow(np.log1p(contact_matrix.toarray()*10), cmap='Reds')
plt.savefig(os.path.join(plot_path, 'hic_raw_{}_{}_{}.png'.format(chr_source, chr_target, args.window)))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help='Link of the Hi-C data hosted on Juicer')
parser.add_argument('--juicer-path', type=str, required=True)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--resolution', type=int, default=10000,
help='Resolution of the Hi-C data.')
parser.add_argument('--window', type=int, default=40000,
help='Resolution of the Hi-C data.')
parser.add_argument('--chromosomes', nargs='*', default=None,
help='List of chromosomes for which to extract the Hi-C data. If empty all the non-sexual chromosomes data will be extracted.')
parser.add_argument('--inter', default=False, action='store_true',
help='Extract also interchromosomal interactions')
parser.add_argument('--save-plot', default=False, action='store_true')
args = parser.parse_args()
if args.window % args.resolution != 0:
raise ValueError('window must be a multiple of the resolution')
main(args)
```
#### File: src/data_preprocessing/02_hic_norm.py
```python
import os
import argparse
import numpy as np
from iced import normalization
import scipy.sparse as sps
def main(args):
chromosomes = range(1, 23) if args.chromosomes is None else args.chromosomes
dataset_path = '../../data/{}/hic_raw'.format(args.dataset)
file_list = os.listdir(dataset_path)
chr_list = [f.split('.')[-2] for f in file_list]
for i in chromosomes:
print('Chromosome ', i)
input_path = file_list[chr_list.index('chr'+str(i))]
original_path = os.path.join(dataset_path, input_path)
# import ipdb
# ipdb.set_trace()
contact_matrix = np.genfromtxt(original_path, delimiter='\t')
contact_matrix = normalization.ICE_normalization(contact_matrix)
contact_matrix_sparse = sps.csr_matrix(contact_matrix)
sps.save_npz(dataset_path + '/hic_raw_{}_{}_{}.npz'.format(i, i, args.resolution),
contact_matrix_sparse)
os.remove(original_path)
if args.save_plot:
plot_path = '../../data/plots/{}/hic_raw'.format(args.dataset)
if not os.path.exists(plot_path):
os.makedirs(plot_path, exist_ok=True)
plt.figure(dpi=200)
plt.imshow(np.log1p(contact_matrix.toarray()*10), cmap='Reds')
plt.savefig(os.path.join(plot_path, 'hic_raw_{}_{}_{}.png'.format(chr_source, chr_target, args.window)))
print('Hi-C data saved in sparse format in', dataset_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--resolution', type=int, required=True, help='Resolution of the Hi-C data.')
parser.add_argument('--chromosomes', nargs='*', default=None,
help='List of chromosomes for which to normalize the Hi-C data. If empty all the non-sexual chromosomes data will be normalized.')
parser.add_argument('--save-plot', default=False, action='store_true')
args = parser.parse_args()
main(args)
```
#### File: src/link_prediction/01_link_prediction_chromosome.py
```python
import numpy as np
import random
import torch
import argparse
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from utils_link_prediction import *
from utils import set_n_threads, set_gpu
from train_GNN import train_main
import ipdb
def main(args):
if args.chr_src is None:
raise ValueError()
if args.chr_tgt is None:
args.chr_tgt = args.chr_src
if args.chromatin_network_name is None:
args.chromatin_network_name = '{}_{}_{}_{}_{}'.format(args.type, args.chr_src, args.chr_tgt, args.bin_size, args.hic_threshold)
args, filename = setup_filenames_and_folders(args, args.chr_src)
np.random.seed(args.seed)
random.seed(args.seed)
if not os.path.exists('{}/results/{}/{}'.format(args.data_root, args.dataset, filename)) or args.force:
print('Prediction of co-expression links from chr. {} to {} using {} embeddings.'.format(args.chr_src, args.chr_tgt, args.method))
coexpression, disconnected_nodes = load_coexpression(args, args.chromatin_network_name, '{}_{}'.format(args.chr_src, args.chr_tgt))
edges, non_edges = get_edges(coexpression)
X_train, X_test, y_train, y_test = build_dataset(args, edges, non_edges, coexpression.shape[0])
print('Method: {}, classifier: {}, seed: {}'.format(args.method, args.classifier, args.seed))
if args.method[:3] != 'GNN':
if args.chr_src == 1:
print('Training on {} dataset'.format(args.dataset))
print('{} training samples, {} testing samples'.format(len(X_train), len(X_test)))
link_prediction(args, X_train, y_train, X_test, y_test, filename)
else:
emb = train_main(args, X_train, y_train, X_test, y_test, filename)
X = np.vstack((X_train, X_test))
y = np.hstack((y_train, y_test))
name = args.chromatin_network_name
emb_file = '{}_es{}_nl{}_nhds{}_clf_{}'.format(name, args.emb_size,
args.n_layers, args.num_heads, args.classifier)
if args.save_emb:
emb_path = '{}/{}/embeddings/{}_{}'.format(args.data_root, args.dataset, args.classifier, args.method)
os.makedirs(emb_path, exist_ok=True)
np.save('{}/{}.npy'.format(emb_path, emb_file), emb.cpu().numpy())
else:
print('Result already computed for {}. Skipped.'.format(filename))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-root', type=str, required=True, default='../../data')
parser.add_argument('--node-feature', type=str, default='random',
choices=['random', 'one-hot', 'biological', 'pre-trained'])
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--nfeat-path', type=str, default=None,
help='require when node feature is biological or pre-trained')
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--chr-src', type=int, default=1)
parser.add_argument('--chr-tgt', type=int, default=1)
parser.add_argument('--n-iter', type=int, default=1)
parser.add_argument('--cv-splits', type=int, default=5)
parser.add_argument('--method', type=str, default='node2vec',
choices=['random', 'distance', 'topological', 'svd', 'node2vec',
'GNN_GCN', 'GNN_HiCoEx', 'GNN_GCN_pyg', 'GNN_HiCoEx_pyg'])
parser.add_argument('--type', type=str)
parser.add_argument('--bin-size', type=int)
parser.add_argument('--hic-threshold', type=str)
parser.add_argument('--chromatin-network-name', type=str)
parser.add_argument('--aggregators', nargs='*', default=['hadamard'], choices=['hadamard', 'avg', 'l1'])
parser.add_argument('--classifier', default='rf', choices=['mlp', 'direct', 'rf', 'random'])
parser.add_argument('--coexp-thr', type=str, default=None, required=True)
parser.add_argument('--save-predictions', default=True, action='store_true')
parser.add_argument('--emb-size', type=int, default=16)
# Topological measures params
parser.add_argument('--edge-features', default=True, action='store_true')
# Node2vec params
parser.add_argument('--num-walks', type=int, default=10)
parser.add_argument('--walk-len', type=int, default=80)
parser.add_argument('--p', type=float, default=1.0)
parser.add_argument('--q', type=float, default=1.0)
parser.add_argument('--window', type=int, default=10)
# GNN_* params
parser.add_argument('--training', default=False, action='store_true')
parser.add_argument('--times', type=int, default=None)
parser.add_argument('--init_lr', type=float, default=0.01)
parser.add_argument('--weight-decay', type=float, default=0.001)
parser.add_argument('--lr-reduce-factor', type=float, default=0.5)
parser.add_argument('--epoches', type=int, default=100)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--n-layers', type=int, default=2)
parser.add_argument('--num-heads', type=int, default=1)
parser.add_argument('--out-dim', type=int, default=16)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--load-ckpt', default=False, action='store_true')
parser.add_argument('--save-emb', default=False, action='store_true')
parser.add_argument('--force', default=False, action='store_true')
parser.add_argument('--test', default=True, action='store_true')
parser.add_argument('--gpu', default=False, action='store_true')
parser.add_argument('--gpu-id', type=int, default=0)
parser.add_argument('--device', type=str, default=None)
parser.add_argument('--n-jobs', type=int, default=10)
parser.add_argument('--wandb', default=False, action='store_false')
parser.add_argument('--project', default='parameter-importance', type=str)
args = parser.parse_args()
# ipdb.set_trace()
device = set_gpu(args.gpu, args.gpu_id)
args.device = device
set_n_threads(args.n_jobs)
main(args)
``` |
{
"source": "JieZheng-ShanghaiTech/MGE4SL",
"score": 2
} |
#### File: JieZheng-ShanghaiTech/MGE4SL/data_prepare.py
```python
import math
import torch
from torch_geometric.data import Data
from torch_geometric.utils import to_undirected
class SynlethDB(Data):
def __init__(self, num_nodes, sl_data,nosl_data):
num_nodes = num_nodes
num_edges = sl_data.shape[0]
neg_num_edges = nosl_data.shape[0]
feat_node_dim = 1
feat_edge_dim = 1
self.x = torch.ones(num_nodes, feat_node_dim)
self.y = torch.randint(0, 2, (num_nodes,))
self.edge_index = torch.tensor(sl_data[['gene_a_encoder','gene_b_encoder']].T.values, dtype=torch.long)
self.edge_attr = torch.ones(num_edges, feat_edge_dim)
self.neg_edge_index = torch.tensor(nosl_data[['gene_a_encoder', 'gene_b_encoder']].T.values, dtype=torch.long)
self.neg_edge_attr = torch.ones(neg_num_edges, feat_edge_dim)
#related knowledge graph
class SynlethDB_KG(Data):
def __init__(self, kg_data, types):
self.type = types
num_nodes = 9872
num_edges = kg_data.shape[0]
feat_node_dim = 1
feat_edge_dim = 1
self.x = torch.ones(num_nodes, feat_node_dim)
self.y = torch.randint(0, 2, (num_nodes,))
self.edge_index = torch.tensor(kg_data[['gene_a_encoder','gene_b_encoder']].T.values, dtype=torch.long)
self.edge_attr = torch.tensor(kg_data[[self.type]].values, dtype = torch.long)
#random negative sample
def get_k_fold_data_random_neg(data, k = 10):
num_nodes = data.num_nodes
row, col = data.edge_index
num_edges = row.size(0)
mask = row < col
row, col = row[mask], col[mask]
neg_row, neg_col = data.neg_edge_index
neg_num_edges = neg_row.size(0)
mask = neg_row < neg_col
neg_row, neg_col = neg_row[mask], neg_col[mask]
assert k > 1
fold_size = num_edges // k
perm = torch.randperm(num_edges)
row, col = row[perm], col[perm]
neg_perm = torch.randperm(neg_num_edges)
neg_row, neg_col = neg_row[neg_perm], neg_col[neg_perm]
res_neg_adj_mask = torch.ones(num_nodes, num_nodes, dtype=torch.uint8)
res_neg_adj_mask = res_neg_adj_mask.triu(diagonal=1).to(torch.bool)
res_neg_adj_mask[row, col] = 0
res_neg_row, res_neg_col = res_neg_adj_mask.nonzero(as_tuple=False).t()
for j in range(k):
val_start = j * fold_size
val_end = (j+1) * fold_size
if j == k - 1:
val_row, val_col = row[val_start:], col[val_start:]
train_row, train_col = row[:val_start], col[:val_start]
else:
val_row, val_col = row[val_start:val_end], col[val_start:val_end]
train_row, train_col = torch.cat([row[:val_start],row[val_end:]], 0), torch.cat([col[:val_start],col[val_end:]], 0)
# val
data.val_pos_edge_index = torch.stack([val_row, val_col], dim=0)
# train
data.train_pos_edge_index = torch.stack([train_row, train_col], dim=0)
add_val = data.val_pos_edge_index.shape[1]
add_train = data.train_pos_edge_index.shape[1]
perm = torch.randperm(res_neg_row.size(0))[:add_val+add_train]
res_neg_row, res_neg_col = res_neg_row[perm], res_neg_col[perm]
res_r, res_c = res_neg_row[:add_val], res_neg_col[:add_val]
data.val_neg_edge_index = torch.stack([res_r, res_c], dim=0)
res_r, res_c = res_neg_row[add_val:add_val+add_train], res_neg_col[add_val:add_val+add_train]
data.train_neg_edge_index = torch.stack([res_r, res_c], dim=0)
data.train_pos_edge_index = to_undirected(data.train_pos_edge_index)
data.train_neg_edge_index = to_undirected(data.train_neg_edge_index)
yield data
def train_test_split_edges_kg(data, test_ratio=0.1):
num_nodes = data.num_nodes
row, col = data.edge_index
data.edge_index = None
num_edges = row.size(0)
# Return upper triangular portion.
mask = row < col
row, col = row[mask], col[mask]
n_t = int(math.floor(test_ratio * num_edges))
# Positive edges.
perm = torch.randperm(row.size(0))
row, col = row[perm], col[perm]
r, c = row[:n_t], col[:n_t]
data.test_pos_edge_index = torch.stack([r, c], dim=0)
r, c = row[n_t:], col[n_t:]
data.train_pos_edge_index = torch.stack([r, c], dim=0)
data.train_pos_edge_index = to_undirected(data.train_pos_edge_index)
# Negative edges.
neg_adj_mask = torch.ones(num_nodes, num_nodes, dtype=torch.uint8)
neg_adj_mask = neg_adj_mask.triu(diagonal=1).to(torch.bool)
neg_adj_mask[row, col] = 0
neg_row, neg_col = neg_adj_mask.nonzero(as_tuple=False).t()
perm = torch.randperm(neg_row.size(0))[:num_edges]
neg_row, neg_col = neg_row[perm], neg_col[perm]
row, col = neg_row[:n_t], neg_col[:n_t]
data.test_neg_edge_index = torch.stack([row, col], dim=0)
row, col = neg_row[n_t:], neg_col[n_t:]
data.train_neg_edge_index = torch.stack([row, col], dim=0)
data.train_neg_edge_index = to_undirected(data.train_neg_edge_index)
return data
def construct_kg_sldb(data):
combined_score_data = data[data['combined_score'] > 0]
reactome_data = data[data['reactome'] > 0]
corum_data = data[data['corum'] > 0]
go_F_data = data[data['go_F'] > 0]
go_C_data = data[data['go_C'] > 0]
go_P_data = data[data['go_P'] > 0]
kegg_data = data[data['kegg'] > 0]
synlethdb_ppi = SynlethDB_KG(combined_score_data, 'combined_score')
synlethdb_ppi = train_test_split_edges_kg(synlethdb_ppi, test_ratio=0)
synlethdb_rea = SynlethDB_KG(reactome_data, 'reactome')
synlethdb_rea = train_test_split_edges_kg(synlethdb_rea, test_ratio=0)
synlethdb_cor = SynlethDB_KG(corum_data, 'corum')
synlethdb_cor = train_test_split_edges_kg(synlethdb_cor, test_ratio=0)
synlethdb_go_F = SynlethDB_KG(go_F_data, 'go_F')
synlethdb_go_F = train_test_split_edges_kg(synlethdb_go_F, test_ratio=0)
synlethdb_go_C = SynlethDB_KG(go_C_data, 'go_C')
synlethdb_go_C = train_test_split_edges_kg(synlethdb_go_C, test_ratio=0)
synlethdb_go_P = SynlethDB_KG(go_P_data, 'go_P')
synlethdb_go_P = train_test_split_edges_kg(synlethdb_go_P, test_ratio=0)
synlethdb_kegg = SynlethDB_KG(kegg_data, 'kegg')
synlethdb_kegg = train_test_split_edges_kg(synlethdb_kegg, test_ratio=0)
return synlethdb_ppi,synlethdb_rea,synlethdb_cor,synlethdb_go_F,synlethdb_go_C,synlethdb_go_P,synlethdb_kegg
``` |
{
"source": "JieZheng-ShanghaiTech/PIKE-R2P",
"score": 2
} |
#### File: JieZheng-ShanghaiTech/PIKE-R2P/gcn_test.py
```python
import torch
import numpy as np
from torchvision.datasets import mnist
from torch import nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset,DataLoader
from tqdm import tqdm
from scipy.stats import pearsonr
from model import Net,Net2
from sklearn.model_selection import train_test_split
from config import Config
def load_data(configs,k=True):
print('loading data....')
x_test = np.load(configs.test_x_file)
y_test = np.load(configs.test_y_file)
configs.input_dim=x_test.shape[1]
configs.output_dim=y_test.shape[1]
try:
print('using knowledge from',configs.corelation_matrix)
coef_m = np.load(configs.corelation_matrix)
coef_list=[]
for c in configs.corelation_list:
coef_list.append(np.load(c))
coef_list=np.array(coef_list)
print(coef_list.shape)
except:
print('random init')
coef_m=np.random.random((y_test.shape[1],y_test.shape[1]))
print(x_test.shape,y_test.shape)
print(x_test.shape,y_test.shape)
x_test = torch.from_numpy(x_test)
y_test = torch.from_numpy(y_test)
datas_test = TensorDataset(x_test, y_test)
data_loader_test = DataLoader(datas_test, batch_size=configs.batch_size, shuffle=False)
if k:
return data_loader_test, coef_m, configs,coef_list
else:
return data_loader_test,coef_m,configs
def test(test_loader,coef_m,coef_list,configs):
label_shape=configs.output_dim
if configs.net_model=='k':
net = Net2(configs.input_dim, configs.output_dim,configs.device,coef_list)
else:
net = Net(configs.input_dim, configs.output_dim, configs.device, coef_m)
net.load_state_dict(torch.load(configs.val_model))
net.to(configs.device)
print(net)
criterion = nn.MSELoss()
net.eval()
output=[[] for _ in range(label_shape)]
targets=[[] for _ in range(label_shape)]
val_loss=0
for data,target in test_loader:
data, target = Variable(data).float(), Variable(target).float()
# data.cuda()
data = data.to(configs.device)
target = target.to(configs.device)
out = net(data)
loss = criterion(out, target)
val_loss+=loss.item()
for i in range(label_shape):
output[i].append(out[:,i])
targets[i].append(target[:,i])
val_loss/=len(test_loader)
pearson_list=[]
for i in range(label_shape):
output[i]=torch.cat([x for x in output[i]]).cpu().detach().numpy().tolist()
targets[i] = torch.cat([x for x in targets[i]]).cpu().detach().numpy().tolist()
p=pearsonr(output[i],targets[i])[0]
if p>=-1 and p<=1:
pearson_list.append(p)
else:
pearson_list.append(0)
print('0 pers at',i,max(output[i]),min(output[i]))
pear_sum=np.mean(np.array(pearson_list))
print('test_loss', val_loss, '\tpearsonr', pear_sum)
with open(configs.logs,'a') as f:
f.write('test\t')
f.write(configs.net_model+'\t')
f.write(str(configs.test)+'\t')
f.write(configs.name+'\t')
f.write(configs.result_save+'\n')
f.write(str(val_loss)+'\t'+str(pear_sum)+'\n')
print(configs.net_model,configs.test,configs.name,configs.result_save)
```
#### File: JieZheng-ShanghaiTech/PIKE-R2P/model.py
```python
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
class Net(nn.Module):
def __init__(self,input_dim,output_dim,device,coef_m=None):
super(Net, self).__init__()
self.output_dim=output_dim
self.fc_input=nn.Linear(input_dim,1024)
self.fc1=nn.Linear(1024,128)
self.dropout=nn.Dropout(0.3)
self.output_list=[]
self.device=device
self.output_list2=[]
for i in range(output_dim):
self.output_list.append(nn.Linear(128,64))
self.output_list2.append(nn.Linear(64,1))
self.output_list=nn.ModuleList(self.output_list)
self.output_list2=nn.ModuleList(self.output_list2)
# self.fc2=nn.Linear(output_dim,output_dim)
self.trans_gcn=nn.Linear(64,64)
self.nodes_trans_nn = nn.Linear(output_dim, output_dim, bias=False)
self.nodes_trans=Variable(torch.from_numpy(coef_m).contiguous().float(),requires_grad=True).to(device)
coef_w=torch.from_numpy(coef_m*0.25).contiguous().float()
self.nodes_trans_nn.weight=torch.nn.Parameter(coef_w)
self.output_mapping=nn.Linear(64,1)
self.prelu=nn.PReLU()
def forward(self,x):
x.to(self.device)
x=F.relu(self.fc_input(x))
x=F.relu(self.fc1(x))
hidden_list=[]
output_list=[]
for i in range(self.output_dim):
hidden_list.append(F.relu(self.output_list[i](x)))
hidden_list[i] = hidden_list[i].unsqueeze(1)
hiddens = torch.cat([h for h in hidden_list], 1)
nodes_hidden = hiddens.permute(0, 2, 1)
nodes_trans = self.nodes_trans_nn(nodes_hidden)
nodes_trans = nodes_trans.permute(0, 2, 1)
# print(nodes_trans.shape)
trans = F.sigmoid(self.trans_gcn(nodes_trans))
for i in range(self.output_dim):
output_list.append(F.elu(self.output_list2[i](trans[:,i:i+1,:].squeeze(1))))
outputs=self.prelu(self.output_mapping(trans)).squeeze(-1)
return outputs
class Net2(nn.Module):
def __init__(self,input_dim,output_dim,device,coef_m=None):
super(Net2, self).__init__()
self.output_dim = output_dim
self.device = device
self.fc_input = nn.Linear(input_dim, 1024)
self.fc1 = nn.Linear(1024, 128)
self.dropout = nn.Dropout(0.3)
self.output_list = []
self.device = device
self.output_list2 = []
self.coef_m = Variable(torch.from_numpy(coef_m).contiguous().float(), requires_grad=True).to(self.device)
self.coef_m = self.coef_m.permute(1, 2, 0)
self.output_list = nn.ModuleList(self.output_list)
self.output_list2 = nn.ModuleList(self.output_list2)
# self.fc2=nn.Linear(output_dim,output_dim)
self.feature_nums = self.coef_m.shape[2]
self.coef_nn_list = []
self.coef_nn_list_att = []
self.feature_hidden = 32
for i in range(self.feature_nums):
self.coef_nn_list.append(nn.Linear(1, self.feature_hidden))
self.coef_nn_list_att.append(nn.Linear(self.feature_hidden, self.feature_hidden, bias=False))
self.coef_nn_list = nn.ModuleList(self.coef_nn_list)
self.coef_nn_list_att = nn.ModuleList(self.coef_nn_list_att)
self.output_list_att = []
for i in range(self.output_dim):
self.output_list_att.append(nn.Linear(self.feature_hidden, self.feature_hidden, bias=False))
self.output_list_att = nn.ModuleList(self.output_list_att)
self.trans_gcn = nn.Linear(64 + self.feature_hidden * self.output_dim,
64 + self.feature_hidden * self.output_dim,bias=False)
self.nodes_trans_nn = nn.Linear(output_dim, output_dim,bias=False)
self.output_mapping = nn.Linear(64 + self.feature_hidden * self.output_dim, 1)
self.prelu = nn.PReLU()
# self.coef_feature=nn.Linear(self.feature_nums*self.feature_hidden,self.feature_nums*self.feature_hidden)
# self.coef_feature=nn.Linear(self.feature_nums*self.feature_hidden*self.output_dim,self.feature_nums*self.feature_hidden)
self.nodes_feature = nn.Linear(64 + self.feature_hidden * self.output_dim, 64)
for i in range(output_dim):
self.output_list.append(nn.Linear(128, 64))
self.output_list2.append(nn.Linear(64 + self.feature_hidden * self.output_dim, 1))
def forward(self, x):
batch_size = x.shape[0]
x.to(self.device)
x = F.relu(self.fc_input(x))
x = F.relu(self.fc1(x))
hidden_list = []
output_list = []
for i in range(self.output_dim):
hidden_list.append(F.relu(self.output_list[i](x)))
hidden_list[i] = hidden_list[i].unsqueeze(1)
coef_feature_list = []
for i in range(self.feature_nums):
coef_feature_list.append(F.elu(self.coef_nn_list[i](self.coef_m[:, :, i].unsqueeze(-1))))
coef_feature = torch.cat([h for h in coef_feature_list], 2)
sum = torch.sum(torch.exp(F.elu(coef_feature)), 2)
sum = sum.unsqueeze(2)
hiddens = torch.cat([h for h in hidden_list], 1)
coef_feature_list_att = []
coef_feature_s = torch.zeros((self.output_dim, self.output_dim, self.feature_hidden)).contiguous().float().to(
self.device)
for idx, f in enumerate(coef_feature_list):
att = torch.exp(F.elu(f))
p = F.elu(att * self.coef_nn_list_att[idx](f) / sum)
coef_feature_list_att.append(p)
coef_feature_s = coef_feature_s + p
coef_feature_s /= self.feature_nums
coef_feature_s = F.elu(coef_feature_s)
# coef_feature = torch.cat([h for h in coef_feature_list_att], 2)
nodes_feature_s = torch.zeros((self.output_dim, self.feature_hidden)).contiguous().float().to(self.device)
coef_sum = torch.sum(torch.exp(F.elu(coef_feature_s)), 1)
coef_feature_att = []
for i in range(self.output_dim):
nodes_feat = coef_feature_s[:, i, :]
p = torch.exp(F.elu(nodes_feat)) / coef_sum
att = p * self.output_list_att[i](nodes_feat)
coef_feature_att.append(att)
coef_feature_att = torch.cat([h for h in coef_feature_att], 1)
coef_feature_att = coef_feature_att.unsqueeze(0)
coef_feature_att = coef_feature_att.repeat(batch_size, 1, 1)
hiddens = torch.cat([hiddens, coef_feature_att], 2)
# graph neural network
nodes_hidden = hiddens.permute(0, 2, 1)
nodes_trans = self.nodes_trans_nn(nodes_hidden)
nodes_trans = nodes_trans.permute(0, 2, 1)
trans = F.sigmoid(self.trans_gcn(nodes_trans))
for i in range(self.output_dim):
output_list.append(self.prelu(self.output_list2[i](trans[:, i:i + 1, :].squeeze(1))))
outputs = self.prelu(self.output_mapping(trans)).squeeze(-1)
return outputs
``` |
{
"source": "Jiezhi/forecastr",
"score": 3
} |
#### File: Jiezhi/forecastr/helper_v4.py
```python
import time
import numpy as np
import pandas as pd
from flask_socketio import emit
from prophet import Prophet
from prophet.diagnostics import cross_validation
from prophet.diagnostics import performance_metrics
def forecastr(data, forecast_settings, column_headers, freq_val, build_settings):
"""
Background: This function will take the data from the csv and forecast out x number of days.
Input:
data: This is a pandas dataframe containing time series data (2 columns: date and metric)
forecast_settings: This is a list containing values for model type, forecast period length and seasonality parameters
column_headers: List containing the name of the date and metric
freq_val: String containing "D","M","Y"
build_settings: String determining whether this is an initial or updated forecast.
Output:
[y_hat,dates,m,csv_ready_for_export]: A list containing forecasted data, dimension, model and data for the csv export
"""
##### Variables, Model Settings & Facebook Prophet Hyper Parameters #####
# Initial Variables
build = build_settings # Determine the build_setting - either initial or update forecast settings.
dimension = column_headers[0] # date
metric = column_headers[1] # metric name
# Rename the columns so we can use FB Prophet
data.rename(index=str, columns={dimension: "ds", metric: "y"}, inplace=True)
# Hyper-parameters
fs_model_type = forecast_settings[0] # linear or logistic
fs_period = int(forecast_settings[1]) # int
fs_seasonality_mode = forecast_settings[4] # additive or multiplicative
fs_daily_seasonality = forecast_settings[6][0] # True or False
fs_weekly_seasonality = forecast_settings[6][1] # True or False
fs_yearly_seasonality = forecast_settings[6][2] # True or False
# Need to set carrying capacity and saturated min as an int if model_type = 'logistic', else we'll set as 'auto' to be filtered out.
if fs_model_type == 'logistic':
fs_carrying_capacity = int(forecast_settings[2]) # int
fs_saturated_minimum = int(forecast_settings[3]) # int
data['cap'] = fs_carrying_capacity
data['floor'] = fs_saturated_minimum
else:
print('no cap or floor needed as it is a linear model.')
fs_carrying_capcity = 'auto'
fs_saturated_minimum = 'auto'
# Additional Hyper Parameters
fs_seasonality_prior_scale = forecast_settings[5] # int
fs_n_changepoints = forecast_settings[7] # int
fs_changepoints_prior_scale = forecast_settings[8] # int??
# Check the following hyper parameters to see if they were set from within the UI. If not, they'll be set to 'auto'
fs_seasonality_prior_scale = check_val_of_forecast_settings(fs_seasonality_prior_scale)
fs_n_changepoints = check_val_of_forecast_settings(fs_n_changepoints)
fs_changepoints_prior_scale = check_val_of_forecast_settings(fs_changepoints_prior_scale)
# Holidays - to be included in a future iteration....
holidays_prior_scale = 10 # Determines how much of an effect holidays should have on a prediction. Default value is 10
#### End of Hyper Parameters Settings ####
# No let's set up the arguments so that we can pass them into Prophet() when we instantiate the model.
arguments = ['growth',
'seasonality_mode',
'seasonality_prior_scale',
'daily_seasonality',
'weekly_seasonality',
'yearly_seasonality',
'n_changepoints',
'changepoint_prior_scale']
arg_values = [fs_model_type,
fs_seasonality_mode,
fs_seasonality_prior_scale,
fs_daily_seasonality,
fs_weekly_seasonality,
fs_yearly_seasonality,
fs_n_changepoints if fs_n_changepoints == 'auto' else int(fs_n_changepoints),
fs_changepoints_prior_scale]
# Needs to be a dictionary
model_arg_vals = dict(zip(arguments, arg_values))
###### CHECK TO SEE WHAT VALUES WERE SET FROM WITHIN THE UI ######
# Check to see if any values are 0, auto or false. If any hyper-parameters have these values, they will not be included
# when the pass in the dictionary prophet_arg_vals as kwarg
prophet_arg_vals = {}
for key, value in model_arg_vals.items():
if value != "" and value != False and value != 0 and value != 'auto':
prophet_arg_vals[key] = value
else:
print(f'skipping {key}: {value}')
##### TIME TO INSTANTIATE, FIT AND PREDICT WITH FACEBOOK PROPHET ######
# Instantiate with prophet_arg_vals that are not auto, 0 or False.
m = Prophet(**prophet_arg_vals)
# Fit the Model - Side Note it would be interesting to time how long this takes by file size #start = time.time()
start = time.time()
m.fit(data)
end = time.time()
print(end - start)
# Status update
emit('processing', {'data': 'model has been fit'})
# Let's create a new data frame for the forecast which includes how long the user requested to forecast out in time units and by time unit type (eg. "D", "M","Y")
future = m.make_future_dataframe(periods=fs_period, freq=freq_val)
# If fs_model_type = 'logistic', create a column in future for carrying_capacity and saturated_minimum
if fs_model_type == 'logistic':
future['cap'] = fs_carrying_capacity
future['floor'] = fs_saturated_minimum
else:
print('no cap or floor needed as it is a linear model.')
# Let's predict the future :)
forecast = m.predict(future)
##### Removed Cross-Validation for this release - see v3 for previous implementation #####
##### Send y_hat and dates to a list, so that they can be graphed easily when set in ChartJS
y_hat = forecast['yhat'].tolist()
yhat_lower = forecast['yhat_lower'].tolist()
yhat_upper = forecast['yhat_upper'].tolist()
dates = forecast['ds'].apply(lambda x: str(x).split(' ')[0]).tolist()
##### Lets see how the forecast compares to historical performance #####
# First, lets sum up the forecasted metric
forecast_sum = forecast['yhat'][-fs_period:].sum()
forecast_mean = forecast['yhat'][-fs_period:].mean()
# Now lets sum up the actuals for the same time interval as we predicted
actual_sum = float(data['y'][-fs_period:].sum())
actual_mean = float(data['y'][-fs_period:].mean())
difference = '{0:.1%}'.format(((forecast_sum - actual_sum) / forecast_sum))
difference_mean = '{0:.1%}'.format(((forecast_mean - actual_mean) / forecast_mean))
forecasted_vals = ['{0:.1f}'.format(forecast_sum), '{0:.1f}'.format(actual_sum), difference]
forecasted_vals_mean = ['{0:.1f}'.format(forecast_mean), '{0:.1f}'.format(actual_mean), difference_mean]
'''
# Lets compare those two numbers, if forecast_sum is greater than actual, calculate the increase. Else, calculate the decrease
if forecast_sum - actual_sum > 0: # this if else handles percent increase vs. decrease
difference = '{0:.2%}'.format(((forecast_sum - actual_sum) / forecast_sum))
print("*********** DIFFERENCE IS ********")
print(difference)
else:
difference = '{0:.2f}'.format(((actual_sum - forecast_sum) / actual_sum))
print("*********** DIFFERENCE IS ********")
print(difference)
'''
####### Formatting data for CSV Export Functionality ##########
# First, let's merge the original and forecast dataframes
data_for_csv_export = pd.merge(forecast, data, on='ds', how='left')
# Select the columns we want to include in the export
export_formatted = data_for_csv_export[['ds', 'y', 'yhat', 'yhat_upper', 'yhat_lower']]
# Rename y and yhat to the actual metric names
export_formatted.rename(index=str, columns={'ds': 'date', 'y': metric, 'yhat': metric + '_forecast',
'yhat_upper': metric + '_upper_forecast',
'yhat_lower': metric + '_lower_forecast'}, inplace=True)
# replace NaN with an empty val
export_formatted = export_formatted.replace(np.nan, '', regex=True)
# Format timestamp
export_formatted['date'] = export_formatted['date'].apply(lambda x: str(x).split(' ')[0])
# Create dictionary format for sending to csv
csv_ready_for_export = export_formatted.to_dict('records')
# print(y_hat)
# print(csv_ready_for_export)
print(forecasted_vals)
print(forecasted_vals_mean)
return [y_hat, dates, m, csv_ready_for_export, forecasted_vals, forecasted_vals_mean, yhat_lower, yhat_upper]
def validate_model(model, dates):
"""
Background:
This model validation function is still under construction and will be updated during a future release.
"""
count_of_time_units = len(dates)
# print(count_of_time_units)
initial_size = str(int(count_of_time_units * 0.20)) + " days"
horizon_size = str(int(count_of_time_units * 0.10)) + " days"
period_size = str(int(count_of_time_units * 0.05)) + " days"
df_cv = cross_validation(model, initial=initial_size, horizon=horizon_size, period=period_size)
# df_cv = cross_validation(model,initial='730 days', period='180 days', horizon = '365 days')
df_p = performance_metrics(df_cv)
# print(df_cv.head(100))
# print(df_p.head(100))
mape_score_avg = str(round(df_p['mape'].mean() * 100, 2)) + "%"
return mape_score_avg
def check_val_of_forecast_settings(param):
"""
Background:
This function is used to check to see if there is a value (submitted from the user in the UI) for a given Prophet Hyper Parameter. If there is no value or false or auto, return that, else we'll return a float of the param given that the value may be a string.
If the param value is blank, false or auto, it will eventually be excluding from the dictionary being passed in when instantiating Prophet.
"""
# Check hyper parameter value and return appropriate value.
if (param == "") or (param == False) or (param == 'auto'):
new_arg = param
return new_arg
else:
new_arg = float(param)
return new_arg
def get_summary_stats(data, column_headers):
"""
Background:
This function will get some summary statistics about the original dataset being uploaded.
Input:
data: a dataframe with the data from the uploaded csv containing a dimension and metric
column_headers: string of column names for the dimension and metric
Output:
sum_stats: a list containing the count of time units, the mean, std, min and max values of the metric. This data is rendered on step 2 of the UI.
"""
# Set the dimension and metrics
dimension = column_headers[0]
metric = column_headers[1]
time_unit_count = str(data[dimension].count())
print(data[metric].mean())
mean = str(round(data[metric].mean(), 2))
print('string of the mean is ' + mean)
std = str(round(data[metric].std(), 2))
minimum = str(round(data[metric].min(), 2))
maximum = str(round(data[metric].max(), 2))
sum_stats = [time_unit_count, mean, std, minimum, maximum]
print(sum_stats)
return sum_stats
def preprocessing(data):
"""
Background: This function will determine which columns are dimensions (time_unit) vs metrics, in addition to reviewing the metric data to see if there are any objects in that column.
Input:
data (df): A dataframe of the parsed data that was uploaded.
Output:
[time_unit,metric_unit]: the appropriate column header names for the dataset.
"""
# Get list of column headers
column_headers = list(data)
# Let's determine the column with a date
col1 = column_headers[0]
col2 = column_headers[1]
print('the first column is ' + col1)
# Get the first value in column 1, which is what is going to be checked.
col1_val = data[col1][0]
print(type(col1_val))
"""
TO DO: Pre-processing around the dtypes of both columns. If both are objects, I'll need to determine which is the column.
TO DO: Emit any error messaging
print('The data type of this metric column is: ' + str(data[metric].dtype))
print(data[metric].head())
data[metric] = data[metric].apply(lambda x: float(x))
print(data[metric].dtype)
"""
# Check to see if the data has any null values
print('Is there any null values in this data? ' + str(data.isnull().values.any()))
# If there is a null value in the dataset, locate it and emit the location of the null value back to the client, else continue:
print(data.tail())
do_nulls_exist = data.isnull().values.any()
if do_nulls_exist == True:
print('found a null value')
null_rows = pd.isnull(data).any(1).nonzero()[0]
print('######### ORIGINAL ROWS THAT NEED UPDATING ##############')
print(null_rows)
# Need to add 2 to each value in null_rows because there
print('######### ROWS + 2 = ACTUAL ROW NUMBERS IN CSV ##############')
update_these_rows = []
for x in null_rows:
update_these_rows.append(int(x) + 2)
print(update_these_rows)
emit('error', {'data': update_these_rows})
else:
print('no nulls found')
if isinstance(col1_val, (int, np.integer)) or isinstance(col1_val, float):
print(str(col1_val) + ' this is a metric')
print('Setting time_unit as the second column')
time_unit = column_headers[1]
metric_unit = column_headers[0]
return [time_unit, metric_unit]
else:
print('Setting time_unit as the first column')
time_unit = column_headers[0]
metric_unit = column_headers[1]
return [time_unit, metric_unit]
def determine_timeframe(data, time_unit):
"""
Background:
This function determines whether the data is daily, weekly, monthly or yearly by checking the delta between the first and second date in the df.
Input:
data: a df containg a dimension and a metric
time_unit: is the dimension name for the date.
Output:
time_list: a list of strings to be used within the UI (time, desc) and when using the function future = m.make_future_dataframe(periods=fs_period, freq=freq_val)
"""
# Determine whether the data is daily, weekly, monthly or yearly
date1 = data[time_unit][0]
date2 = data[time_unit][1]
first_date = pd.Timestamp(data[time_unit][0])
second_date = pd.Timestamp(data[time_unit][1])
time_delta = second_date - first_date
time_delta = int(str(time_delta).split(' ')[0])
print([data[time_unit][0], data[time_unit][1]])
print([second_date, first_date, time_delta])
if time_delta == 1:
time = 'days'
freq = 'D'
desc = 'daily'
elif 7 <= time_delta <= 27:
time = 'weeks'
freq = 'W'
desc = 'weekly'
elif 28 <= time_delta <= 31:
time = 'months'
freq = 'M'
desc = 'monthly'
elif time_delta >= 364:
time = 'years'
freq = 'Y'
desc = 'yearly'
else:
print('error?')
time_list = [time, freq, desc]
# print(time_list)
return time_list
``` |
{
"source": "Jiezhi/myleetcode",
"score": 3
} |
#### File: myleetcode/src/1009-ComplementofBase10Integer.py
```python
class Solution:
def bitwiseComplement(self, n: int) -> int:
"""
Runtime: 43 ms, faster than 9.05%
Memory Usage: 14.3 MB, less than 38.43%
0 <= n < 10^9
:param n:
:return:
"""
ret = []
n_str = format(n, 'b')
for c in n_str:
if c == '1':
ret.append('0')
else:
ret.append('1')
return int(''.join(ret), 2)
def test():
assert Solution().bitwiseComplement(n=5) == 2
assert Solution().bitwiseComplement(n=7) == 0
assert Solution().bitwiseComplement(n=10) == 5
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1010-PairsofSongsWithTotalDurationsDivisibleby60.py
```python
import collections
from typing import List
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
"""
Ref: https://leetcode.com/problems/pairs-of-songs-with-total-durations-divisible-by-60/discuss/256738/JavaC%2B%2BPython-Two-Sum-with-K-60
Runtime: 208 ms, faster than 97.68%
Memory Usage: 18 MB, less than 48.64%
1 <= time.length <= 6 * 10^4
1 <= time[i] <= 500
:param time:
:return:
"""
remainders = [0] * 60
ret = 0
for t in time:
ret += remainders[-t % 60]
remainders[t % 60] += 1
return ret
def test():
assert Solution().numPairsDivisibleBy60(time=[30, 20, 150, 100, 40]) == 3
assert Solution().numPairsDivisibleBy60(time=[60]) == 0
assert Solution().numPairsDivisibleBy60(time=[60, 60]) == 1
assert Solution().numPairsDivisibleBy60(time=[60, 60, 60]) == 3
assert Solution().numPairsDivisibleBy60(time=[60, 60, 60, 60]) == 6
assert Solution().numPairsDivisibleBy60(time=[60, 60, 60, 60, 60]) == 10
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1022-SumOfRootToLeafBinaryNumbers.py
```python
import collections
from typing import Optional
from src.tree_node import TreeNode, build_tree_node
class Solution:
def sumRootToLeaf(self, root: Optional[TreeNode]) -> int:
"""
Runtime: 52 ms, faster than 24.89%
Memory Usage: 14.7 MB, less than 39.83%
The number of nodes in the tree is in the range [1, 1000].
Node.val is 0 or 1.
:param root:
:return:
"""
ret = 0
def dfs(node, num):
if not node:
return
nonlocal ret
num = (num << 1) | node.val
if not node.left and not node.right:
ret += num
return
if node.left:
dfs(node.left, num)
if node.right:
dfs(node.right, num)
dfs(root, 0)
return ret
def test():
assert Solution().sumRootToLeaf(root=build_tree_node([1, 1, 1, 1, 1, 1, 1])) == 28
assert Solution().sumRootToLeaf(root=build_tree_node([1, 0, 1, 0, 1, 0, 1])) == 22
assert Solution().sumRootToLeaf(root=build_tree_node([0])) == 0
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/102-BinaryTreeLevelOrderTraversal.py
```python
import queue
from tree_node import *
class Solution:
def level_order(self, root: TreeNode) -> list:
if not root:
return []
ret = []
q = queue.Queue()
q.put([root])
while q.qsize() > 0:
tmp_ret = []
tmp_list = []
for tmp in q.get():
tmp_ret.append(tmp.val)
if tmp.left:
tmp_list.append(tmp.left)
if tmp.right:
tmp_list.append(tmp.right)
ret.append(tmp_ret)
if tmp_list:
q.put(tmp_list)
return ret
def test():
assert Solution().level_order(build_tree_node([3, 9, 20, None, None, 15, 7])) == [[3], [9, 20], [15, 7]]
```
#### File: myleetcode/src/1041-RobotBoundedInCircle.py
```python
class Solution:
def isRobotBounded(self, instructions: str) -> bool:
"""
Runtime: 24 ms, faster than 96.05%
Memory Usage: 14.4 MB, less than 15.52%
1 <= instructions.length <= 100
ewsnstructions[i] is 'G', 'L' or, 'R'.
:param instructions:
:return:
"""
pos = (0, 0)
direction = 'N'
def executeInstructions():
nonlocal direction
nonlocal pos
for instruction in instructions:
if instruction == 'G':
if direction == 'N':
pos = (pos[0], pos[1] + 1)
elif direction == 'W':
pos = (pos[0] - 1, pos[1])
elif direction == 'S':
pos = (pos[0], pos[1] - 1)
else:
pos = (pos[0] + 1, pos[1])
elif instruction == 'L':
if direction == 'N':
direction = 'W'
elif direction == 'W':
direction = 'S'
elif direction == 'S':
direction = 'E'
elif direction == 'E':
direction = 'N'
elif instruction == 'R':
if direction == 'N':
direction = 'E'
elif direction == 'E':
direction = 'S'
elif direction == 'S':
direction = 'W'
elif direction == 'W':
direction = 'N'
return pos
pos1 = executeInstructions()
executeInstructions()
pos2 = executeInstructions()
if (pos2[0] * pos2[0] + pos2[1] * pos2[1]) > (pos1[0] * pos1[0] + pos1[1] * pos1[1]):
return False
return True
def test():
assert Solution().isRobotBounded(instructions="GGLLGG")
assert not Solution().isRobotBounded(instructions="GG")
assert Solution().isRobotBounded(instructions="GL")
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1046-LastStoneWeight.py
```python
import heapq
from typing import List
class Solution:
def lastStoneWeight(self, stones: List[int]) -> int:
"""
70 / 70 test cases passed.
Status: Accepted
Runtime: 24 ms
Memory Usage: 14.4 MB
1 <= stones.length <= 30
1 <= stones[i] <= 1000
:param stones:
:return:
"""
if len(stones) == 1:
return stones[0]
stones = [-x for x in stones]
heapq.heapify(stones)
while len(stones) > 1:
k1 = heapq.heappop(stones)
k2 = heapq.heappop(stones)
if k1 != k2:
heapq.heappush(stones, k1 - k2)
if len(stones) == 1:
return -stones[0]
else:
return 0
def test():
assert Solution().lastStoneWeight(stones=[2, 7, 4, 1, 8, 1]) == 1
assert Solution().lastStoneWeight(stones=[1]) == 1
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1089-DuplicateZeros.py
```python
from typing import List
class Solution:
def duplicateZeros(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
Just create a new array, and set the value to the old arr.
"""
arr_len = len(arr)
new_arr = []
for num in arr:
new_arr.append(num)
if num == 0:
new_arr.append(0)
if len(new_arr) == arr_len:
break
for i in range(arr_len):
arr[i] = new_arr[i]
def duplicateZeros2(self, arr: List[int]) -> None:
"""
Do not return anything, modify arr in-place instead.
In place the arr from the end when encounter zero.
"""
arr_len = len(arr)
i = 0
# The last one number don't need handle
while i < arr_len - 1:
if arr[i] == 0:
for j in range(arr_len - 1, i, -1):
arr[j] = arr[j - 1]
# jump over the duplicate zero
i += 2
else:
# move forward
i += 1
def test():
arr = [1, 0, 2, 3, 0, 4, 5, 0]
Solution().duplicateZeros(arr)
assert arr == [1, 0, 0, 2, 3, 0, 0, 4]
arr = [1, 2, 3]
Solution().duplicateZeros(arr)
assert arr == [1, 2, 3]
arr = [1, 0, 2, 3, 0, 4, 5, 0]
Solution().duplicateZeros2(arr)
assert arr == [1, 0, 0, 2, 3, 0, 0, 4]
arr = [1, 2, 3]
Solution().duplicateZeros2(arr)
assert arr == [1, 2, 3]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/116-PopulatingNextRightPointersinEachNode.py
```python
import node
from node import Node
from tool import print_results
class Solution:
@print_results
def connect(self, root: Node) -> Node:
"""
58 / 58 test cases passed.
Status: Accepted
Runtime: 56 ms
Memory Usage: 15.7 MB
:param root:
:return:
"""
if root is None or root.val is None:
return root
parent_list = [root, None]
child_list = []
while len(parent_list) > 1:
for i in range(len(parent_list) - 1):
parent_list[i].next = parent_list[i + 1]
if parent_list[i].left is not None:
child_list.append(parent_list[i].left)
if parent_list[i].right is not None:
child_list.append(parent_list[i].right)
parent_list = child_list.copy()
parent_list.append(None)
child_list = []
return root
def test():
assert Solution().connect(root=node.build_node_without_next([1, 2, 3, 4, 5, 6, 7]))
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1189-MaximumNumberofBalloons.py
```python
import collections
class Solution:
def maxNumberOfBalloons(self, text: str) -> int:
"""
24 / 24 test cases passed.
Status: Accepted
Runtime: 28 ms
Memory Usage: 14.5 MB
balloon
:param text:
:return:
"""
cnt = collections.Counter(text)
return min(cnt['b'], cnt['a'], cnt['l'] // 2, cnt['o'] // 2, cnt['n'])
def test():
assert Solution().maxNumberOfBalloons(text="nlaebolko") == 1
assert Solution().maxNumberOfBalloons(text="loonbalxballpoon") == 2
assert Solution().maxNumberOfBalloons(text="leetcode") == 0
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/120-Triangle.py
```python
from typing import List
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
"""
If reverse iterator triangle to avoid reverse list operation.
44 / 44 test cases passed.
Status: Accepted
Runtime: 64 ms
Memory Usage: 15.2 MB
:param triangle:
:return:
"""
h = len(triangle)
# it would be easier to sum reverse triangle
triangle.reverse()
dp = [triangle[0]]
for i in range(1, h):
tmp = []
for j in range(h - i):
tmp.append(triangle[i][j] + min(dp[i - 1][j], dp[i - 1][j + 1]))
dp.append(tmp)
return dp[-1][0]
def test():
assert Solution().minimumTotal(triangle=[[2], [3, 4], [6, 5, 7], [4, 1, 8, 3]]) == 11
assert Solution().minimumTotal(triangle=[[-10]]) == -10
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/123-BestTimetoBuyandSellStockIII.py
```python
import sys
from typing import List
class Solution:
def maxProfit(self, prices: List[int]) -> int:
"""
https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/discuss/39743/Python-DP-solution-120ms
Runtime: 1383 ms, faster than 44.21%
Memory Usage: 27.8 MB, less than 87.46%
1 <= prices.length <= 10**5
0 <= prices[i] <= 10**5
:param prices:
:return:
"""
min_price = prices[0]
max_profit = 0
profits = []
# first get the max profit if we only use 1 transaction
for price in prices:
min_price = min(price, min_price)
max_profit = max(max_profit, price - min_price)
profits.append(max_profit)
curr_max_price = 0
max_profit = 0
# we backwards see the max profits if we do more 1 transaction
for i in range(len(prices) - 1, 0, -1):
curr_max_price = max(curr_max_price, prices[i])
max_profit = max(max_profit, curr_max_price - prices[i] + profits[i])
return max_profit
def maxProfit2(self, prices: List[int]) -> int:
"""
https://leetcode.com/problems/best-time-to-buy-and-sell-stock-iii/discuss/39743/Python-DP-solution-120ms/301157
:param prices:
:return:
"""
one_buy = two_buy = sys.maxsize
one_profit = two_profit = 0
for p in prices:
one_buy = min(one_buy, p)
one_profit = max(one_profit, p - one_buy)
two_buy = min(two_buy, p - one_profit)
two_profit = max(two_profit, p - two_buy)
return two_profit
def test():
assert Solution().maxProfit2(prices=[1, 2, 4, 2, 5, 7, 2, 4, 9, 0]) == 13
assert Solution().maxProfit2(prices=[6, 1, 3, 2, 4, 7]) == 7
assert Solution().maxProfit2(prices=[3, 3, 5, 0, 0, 3, 1, 4]) == 6
assert Solution().maxProfit2(prices=[1, 2, 3, 4, 5]) == 4
assert Solution().maxProfit2(prices=[7, 6, 4, 3, 1]) == 0
assert Solution().maxProfit(prices=[1, 2, 4, 2, 5, 7, 2, 4, 9, 0]) == 13
assert Solution().maxProfit(prices=[6, 1, 3, 2, 4, 7]) == 7
assert Solution().maxProfit(prices=[3, 3, 5, 0, 0, 3, 1, 4]) == 6
assert Solution().maxProfit(prices=[1, 2, 3, 4, 5]) == 4
assert Solution().maxProfit(prices=[7, 6, 4, 3, 1]) == 0
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1275-FindWinneronaTicTacToeGame.py
```python
from typing import List
class Solution:
def tictactoe(self, moves: List[List[int]]) -> str:
"""
100 / 100 test cases passed.
Status: Accepted
Runtime: 42 ms
Memory Usage: 14.3 MB
:param moves:
:return:
"""
if len(moves) < 5:
return 'Pending'
matrix = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for x in range(0, len(moves), 2):
matrix[moves[x][0]][moves[x][1]] = 1
for x in range(1, len(moves), 2):
matrix[moves[x][0]][moves[x][1]] = 2
if matrix[0][0] == matrix[1][1] == matrix[2][2]:
if matrix[0][0] == 1:
return 'A'
elif matrix[0][0] == 2:
return 'B'
if matrix[0][2] == matrix[1][1] == matrix[2][0]:
if matrix[0][2] == 1:
return 'A'
elif matrix[0][2] == 2:
return 'B'
for i in [0, 1, 2]:
if matrix[i][0] == matrix[i][1] == matrix[i][2]:
if matrix[i][0] == 1:
return 'A'
elif matrix[i][0] == 2:
return 'B'
for i in [0, 1, 2]:
if matrix[0][i] == matrix[1][i] == matrix[2][i]:
if matrix[0][i] == 1:
return 'A'
elif matrix[0][i] == 2:
return 'B'
return 'Pending' if len(moves) < 9 else 'Draw'
def test():
assert Solution().tictactoe(moves=[[2, 0], [1, 1], [0, 2], [2, 1], [1, 2], [1, 0], [0, 0], [0, 1]]) == 'B'
assert Solution().tictactoe(moves=[[0, 0], [2, 0], [1, 1], [2, 1], [2, 2]]) == 'A'
assert Solution().tictactoe(moves=[[0, 0], [1, 1], [0, 1], [0, 2], [1, 0], [2, 0]]) == 'B'
assert Solution().tictactoe(
moves=[[0, 0], [1, 1], [2, 0], [1, 0], [1, 2], [2, 1], [0, 1], [0, 2], [2, 2]]) == 'Draw'
assert Solution().tictactoe(moves=[[0, 0], [1, 1]]) == 'Pending'
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1314-MatrixBlockSum.py
```python
from typing import List
class Solution:
def matrixBlockSum(self, mat: List[List[int]], k: int) -> List[List[int]]:
"""
12 / 12 test cases passed.
Status: Accepted
Runtime: 104 ms
Memory Usage: 15.2 MB
:param mat:
:param k:
:return:
"""
m, n = len(mat), len(mat[0])
ret = [[0 for _ in range(n)] for _ in range(m)]
dp = [[0 for _ in range(n)] for _ in range(m)]
# dp accumulate from [0][0] to [i][j]
dp[0][0] = mat[0][0]
for i in range(1, m):
dp[i][0] = mat[i][0] + dp[i - 1][0]
for j in range(1, n):
dp[0][j] = mat[0][j] + dp[0][j - 1]
for i in range(1, m):
for j in range(1, n):
dp[i][j] = mat[i][j] + dp[i - 1][j] + dp[i][j - 1] - dp[i - 1][j - 1]
for i in range(m):
for j in range(n):
max_r = min(i + k, m - 1)
max_c = min(j + k, n - 1)
ret[i][j] = dp[max_r][max_c]
if i - k > 0:
ret[i][j] -= dp[i - k - 1][max_c]
if j - k > 0:
ret[i][j] -= dp[max_r][j - k - 1]
if i - k > 0 and j - k > 0:
ret[i][j] += dp[i - k - 1][j - k - 1]
return ret
def test():
assert Solution().matrixBlockSum(mat=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], k=1) == [[12, 21, 16], [27, 45, 33],
[24, 39, 28]]
assert Solution().matrixBlockSum(mat=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], k=2) == [[45, 45, 45], [45, 45, 45],
[45, 45, 45]]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1346-CheckIfNandItsDoubleExist.py
```python
from typing import List
class Solution:
def checkIfExist(self, arr: List[int]) -> bool:
for i in range(len(arr)):
for j in range(len(arr)):
if arr[j] == arr[i] * 2 and j != i:
return True
return False
def test():
assert Solution().checkIfExist([10, 2, 5, 3])
assert Solution().checkIfExist([7, 1, 14, 11])
assert not Solution().checkIfExist([3, 1, 7, 11])
assert not Solution().checkIfExist([-2, 0, 10, -19, 4, 6, -8])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/136-SingleNumber.py
```python
from typing import List
class Solution:
def singleNumber(self, nums: List[int]) -> int:
return 2 * sum(set(nums)) - sum(nums)
def singleNumber2(self, nums: List[int]) -> int:
a = 0
for n in nums:
a ^= n
return a
def test():
assert Solution().singleNumber(nums=[2, 2, 1]) == 1
assert Solution().singleNumber(nums=[4, 1, 2, 1, 2]) == 4
assert Solution().singleNumber(nums=[1]) == 1
assert Solution().singleNumber2(nums=[2, 2, 1]) == 1
assert Solution().singleNumber2(nums=[4, 1, 2, 1, 2]) == 4
assert Solution().singleNumber2(nums=[1]) == 1
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/143-ReorderList.py
```python
from typing import Optional
from src.list_node import ListNode, buildListNode
class Solution:
def reorderList(self, head: Optional[ListNode]) -> None:
"""
Runtime: 96 ms, faster than 53.19%
Memory Usage: 23.2 MB, less than 94.94%
Do not return anything, modify head in-place instead.
The number of nodes in the list is in the range [1, 5 * 10^4].
1 <= Node.val <= 1000
"""
nums = []
node = head
while node:
nums.append(node.val)
node = node.next
node = head.next
index = 1
flag = 1
while index <= len(nums) / 2 and node:
flag *= -1
node.val = nums[index * flag]
node = node.next
if flag == 1:
index += 1
def test():
test_case = buildListNode([1])
Solution().reorderList(test_case)
assert test_case == buildListNode([1])
test_case = buildListNode([1, 2])
Solution().reorderList(test_case)
assert test_case == buildListNode([1, 2])
test_case = buildListNode([1, 2, 3])
Solution().reorderList(test_case)
assert test_case == buildListNode([1, 3, 2])
test_case = buildListNode([1, 2, 3, 4])
Solution().reorderList(test_case)
assert test_case == buildListNode([1, 4, 2, 3])
test_case = buildListNode([1, 2, 3, 4, 5])
Solution().reorderList(test_case)
assert test_case == buildListNode([1, 5, 2, 4, 3])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1448-CountGoodNodesinBinaryTree.py
```python
import collections
from tree_node import *
class Solution:
def goodNodes(self, root: TreeNode) -> int:
"""
63 / 63 test cases passed.
Status: Accepted
Runtime: 244 ms
Memory Usage: 31.5 MB
:param root:
:return:
"""
good_nodes = 1
dq = collections.deque()
dq.append(root)
while len(dq) > 0:
node = dq.pop()
if node.left:
if node.val <= node.left.val:
good_nodes += 1
else:
node.left.val = node.val
dq.append(node.left)
if node.right:
if node.val <= node.right.val:
good_nodes += 1
else:
node.right.val = node.val
dq.append(node.right)
return good_nodes
def test():
null = None
assert Solution().goodNodes(build_tree_node([3, 1, 4, 3, null, 1, 5])) == 4
assert Solution().goodNodes(build_tree_node([3, 3, null, 4, 2])) == 3
assert Solution().goodNodes(build_tree_node([1])) == 1
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/14-LongestCommonPrefix.py
```python
class Solution:
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if not strs:
return ""
if len(strs) == 1:
return strs[0]
str1 = strs[0]
longest = ""
for c in str1:
longest += c
for s in strs[1:]:
if not s.startswith(longest):
return longest[:-1]
return longest
def test():
assert Solution().longestCommonPrefix([]) == ""
assert Solution().longestCommonPrefix(["flower", "flow", "flight"]) == "fl"
assert Solution().longestCommonPrefix(["dog", "racecar", "car"]) == ""
assert Solution().longestCommonPrefix(["dog", "dog", "dog"]) == "dog"
```
#### File: myleetcode/src/152-MaximumProductSubarray.py
```python
from typing import List
class Solution:
def maxProduct(self, nums: List[int]) -> int:
"""
Runtime: 60 ms, faster than 43.10%
Memory Usage: 14.1 MB, less than 95.16%
1 <= nums.length <= 2 * 10^4
-10 <= nums[i] <= 10
The product of any prefix or suffix of nums is guaranteed to fit in a 32-bit integer.
:param nums:
:return:
"""
pre_dp = [nums[0], nums[0]]
ret = nums[0]
for num in nums[1:]:
cur_dp = [max(num, pre_dp[0] * num, pre_dp[1] * num), min(num, pre_dp[0] * num, pre_dp[1] * num)]
pre_dp[0] = cur_dp[0]
pre_dp[1] = cur_dp[1]
ret = max(ret, cur_dp[0])
return ret
def test():
assert Solution().maxProduct(nums=[2, 3, -2, 4]) == 6
assert Solution().maxProduct(nums=[2, 3, -2, 4, -1]) == 48
assert Solution().maxProduct(nums=[-2, 0, -1]) == 0
assert Solution().maxProduct(nums=[-2]) == -2
assert Solution().maxProduct(nums=[-2, 1]) == 1
assert Solution().maxProduct(nums=[-2, 1, -1]) == 2
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/153-FindMinimuminRotatedSortedArray.py
```python
from typing import List
class Solution:
def findMin(self, nums: List[int]) -> int:
"""
150 / 150 test cases passed.
Status: Accepted
Runtime: 44 ms
Memory Usage: 14.7 MB
:param nums:
:return:
"""
if len(nums) == 1:
return nums[0]
if len(nums) == 2:
return min(nums[0], nums[1])
mid = (len(nums) - 1) // 2
if nums[mid - 1] > nums[mid] < nums[mid + 1]:
return nums[mid]
if nums[mid - 1] < nums[mid] > nums[mid + 1]:
return nums[mid + 1]
if nums[mid] >= nums[-1]:
return self.findMin(nums[mid + 1:])
else:
return self.findMin(nums[:mid])
def test():
assert Solution().findMin(nums=[3]) == 3
assert Solution().findMin(nums=[3, 4, 5]) == 3
assert Solution().findMin(nums=[3, 4, 5, 0]) == 0
assert Solution().findMin(nums=[6, 3, 4, 5]) == 3
assert Solution().findMin(nums=[6, 7, 3, 4, 5]) == 3
assert Solution().findMin(nums=[3, 4, 5, 1, 2]) == 1
assert Solution().findMin(nums=[4, 5, 6, 7, 0, 1, 2]) == 0
assert Solution().findMin(nums=[11, 13, 15, 17]) == 11
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1629-SlowestKey.py
```python
from typing import List
class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
"""
105 / 105 test cases passed.
Status: Accepted
Runtime: 48 ms
Memory Usage: 14.3 MB
:param releaseTimes:
:param keysPressed:
:return:
"""
tmp_max = releaseTimes[0]
ret = keysPressed[0]
for i in range(1, len(releaseTimes)):
t = releaseTimes[i] - releaseTimes[i - 1]
if t > tmp_max:
tmp_max = t
ret = keysPressed[i]
if t == tmp_max:
ret = max(ret, keysPressed[i])
return ret
def test():
assert Solution().slowestKey(releaseTimes=[9, 29, 49, 50], keysPressed="cbcd") == 'c'
assert Solution().slowestKey(releaseTimes=[50, 9, 29, 49, 50], keysPressed="zcbcd") == 'z'
assert Solution().slowestKey(releaseTimes=[12, 23, 36, 46, 62], keysPressed="spuda") == 'a'
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/168-ExcelSheetColumnTitle.py
```python
class Solution:
def convertToTitle(self, columnNumber: int) -> str:
"""
18 / 18 test cases passed.
Status: Accepted
Runtime: 16 ms
Memory Usage: 14.2 MB
:param columnNumber:
:return:
"""
ret = ''
while columnNumber > 0:
columnNumber -= 1
columnNumber, l = divmod(columnNumber, 26)
ret = chr(l + 65) + ret
return ret
def test():
assert Solution().convertToTitle(columnNumber=1) == 'A'
assert Solution().convertToTitle(columnNumber=28) == 'AB'
assert Solution().convertToTitle(columnNumber=52) == 'AZ'
assert Solution().convertToTitle(columnNumber=701) == 'ZY'
assert Solution().convertToTitle(columnNumber=2147483647) == 'FXSHRXW'
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/190-ReverseBits.py
```python
from tool import print_results
class Solution:
@print_results
def reverseBits(self, n: int) -> int:
"""
600 / 600 test cases passed.
Status: Accepted
Runtime: 20 ms
Memory Usage: 14.3 MB
:param n:
:return:
"""
reversed_n = format(n, 'b')[::-1]
reversed_n += '0' * (32 - len(reversed_n))
return int(reversed_n, 2)
def test():
assert Solution().reverseBits(n=43261596) == 964176192
assert Solution().reverseBits(n=0b00000010100101000001111010011100) == 964176192
assert Solution().reverseBits(n=0b11111111111111111111111111111101) == 3221225471
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1952-ThreeDivisors.py
```python
class Solution:
def isThree(self, n: int) -> bool:
cnt = 0
for i in range(2, int(n / 2) + 1):
if n % i == 0:
cnt += 1
return cnt == 1
def test():
assert not Solution().isThree(n=2)
assert not Solution().isThree(n=8)
assert Solution().isThree(n=4)
assert Solution().isThree(n=9)
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1991-FindtheMiddleIndexinArray.py
```python
from typing import List
class Solution:
def findMiddleIndex(self, nums: List[int]) -> int:
if len(nums) == 1:
return 0
right_sum = sum(nums) - nums[0]
left_sum = 0
if right_sum == left_sum:
return 0
for i in range(1, len(nums)):
right_sum -= nums[i]
left_sum += nums[i - 1]
if left_sum == right_sum:
return i
return -1
def test():
assert Solution().findMiddleIndex(nums=[2, 3, -1, 8, 4]) == 3
assert Solution().findMiddleIndex(nums=[1, -1, 4]) == 2
assert Solution().findMiddleIndex(nums=[2, 5]) == -1
assert Solution().findMiddleIndex(nums=[1]) == 0
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/1995-CountSpecialQuadruplets.py
```python
from typing import List
class Solution:
def countQuadruplets(self, nums: List[int]) -> int:
ret = 0
for i in range(len(nums) - 3):
for j in range(i + 1, len(nums) - 2):
for k in range(j + 1, len(nums) - 1):
for l in range(k + 1, len(nums)):
if nums[i] + nums[j] + nums[k] == nums[l]:
ret += 1
return ret
def test():
assert Solution().countQuadruplets([9, 6, 8, 23, 39, 23]) == 2
assert Solution().countQuadruplets(nums=[1, 2, 3, 6]) == 1
assert Solution().countQuadruplets(nums=[1, 1, 1, 3, 5]) == 4
assert Solution().countQuadruplets(nums=[3, 3, 6, 4, 5]) == 0
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/202-HappyNumber.py
```python
class Solution:
def isHappy(self, n: int) -> bool:
"""
402 / 402 test cases passed.
Status: Accepted
Runtime: 36 ms
Memory Usage: 14 MB
:param n:
:return:
"""
if n == 1:
return True
def replace(num) -> int:
i, j = divmod(num, 10)
ret = j * j
while i >= 10:
i, j = divmod(i, 10)
ret += j * j
ret += i * i
return ret
circle = []
while n not in circle:
circle.append(n)
n = replace(n)
if n == 1:
return True
return False
def test():
assert Solution().isHappy(n=19)
assert Solution().isHappy(n=1)
assert not Solution().isHappy(n=2)
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/2037-MinimumNumberofMovestoSeatEveryone.py
```python
from typing import List
class Solution:
def minMovesToSeat(self, seats: List[int], students: List[int]) -> int:
"""
n == seats.length == students.length
1 <= n <= 100
1 <= seats[i], students[j] <= 100
:param seats:
:param students:
:return:
"""
ret = 0
for seat, student in zip(sorted(seats), sorted(students)):
ret += abs(seat - student)
return ret
def test():
assert Solution().minMovesToSeat(seats=[3, 1, 5], students=[2, 7, 4]) == 4
assert Solution().minMovesToSeat(seats=[2, 2, 6, 6], students=[1, 3, 2, 6]) == 4
assert Solution().minMovesToSeat(seats=[4, 1, 5, 9], students=[1, 3, 2, 6]) == 7
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/2094-Finding-3-DigitEvenNumbers.py
```python
from typing import List
class Solution:
def findEvenNumbers(self, digits: List[int]) -> List[int]:
"""
3 <= digits.length <= 100
0 <= digits[i] <= 9
:param digits:
:return:
"""
ret_set = set()
for i in range(len(digits)):
if digits[i] % 2 == 0:
for j in range(len(digits)):
if j == i or digits[j] == 0:
continue
for k in range(len(digits)):
if k == i or k == j:
continue
ret_set.add(digits[j] * 100 + digits[k] * 10 + digits[i])
else:
continue
return sorted(list(ret_set))
def test():
assert Solution().findEvenNumbers(digits=[2, 1, 3, 0]) == [102, 120, 130, 132, 210, 230, 302, 310, 312, 320]
assert Solution().findEvenNumbers(digits=[2, 2, 8, 8, 2]) == [222, 228, 282, 288, 822, 828, 882]
assert Solution().findEvenNumbers(digits=[3, 7, 5]) == []
assert Solution().findEvenNumbers(digits=[0, 2, 0, 0]) == [200]
assert Solution().findEvenNumbers(digits=[0, 0, 0]) == []
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/2099-FindSubsequenceofLengthKWiththeLargestSum.py
```python
from typing import List
from src import tool
class Solution:
def maxSubsequence(self, nums: List[int], k: int) -> List[int]:
"""
1 <= nums.length <= 1000
-10^5 <= nums[i] <= 10^5
1 <= k <= nums.length
:param nums:
:param k:
:return:
"""
if k == len(nums):
return nums
smallest = sorted(nums)[:len(nums) - k]
for num in smallest:
nums.pop(nums.index(num))
return nums
def test():
assert Solution().maxSubsequence(nums=[2, 1, 3, 3], k=2) == [3, 3]
assert Solution().maxSubsequence(nums=[-1, -2, 3, 4], k=3) == [-1, 3, 4]
assert tool.equal_list_value(Solution().maxSubsequence(nums=[3, 4, 3, 3], k=2) == [3, 4])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/213-HouseRobberII.py
```python
from typing import List
class Solution:
def rob(self, nums: List[int]) -> int:
"""
75 / 75 test cases passed.
Status: Accepted
Runtime: 20 ms
Memory Usage: 14.3 MB
:param nums:
:return:
"""
if len(nums) <= 3:
return max(nums)
def rob2(nums: List[int]) -> int:
dp = [0] * len(nums)
dp[0] = nums[0]
for i in range(1, len(nums)):
dp[i] = max(nums[i] + dp[i - 2], dp[i - 1])
return dp[-1]
# we rob twice without the first and the last separately, and choose the max one
return max(rob2(nums[1:]), rob2(nums[:-1]))
def test():
assert Solution().rob(nums=[4, 1, 2, 8, 1]) == 12
assert Solution().rob(nums=[4, 1, 2, 8]) == 9
assert Solution().rob(nums=[2, 3, 2]) == 3
assert Solution().rob(nums=[1, 2, 3, 1]) == 4
assert Solution().rob(nums=[1, 2, 3]) == 3
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/221-MaximalSquare.py
```python
from typing import List
class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
"""
Runtime: 1936 ms, faster than 5.03%
Memory Usage: 15.5 MB, less than 75.88%
m == matrix.length
n == matrix[i].length
1 <= m, n <= 300
matrix[i][j] is '0' or '1'.
:param matrix:
:return:
"""
m, n = len(matrix), len(matrix[0])
ret = 0
for i in range(m):
for j in range(n):
if matrix[i][j] == '1':
k = 1
ex = False
while not ex:
for r in range(k):
if j + k >= n or i + r >= m or matrix[i + r][j + k] == '0':
ex = True
break
if j + r >= n or i + k >= m or matrix[i + k][j + r] == '0':
ex = True
break
if matrix[i + k][j + k] == '0':
ex = True
break
k += 1
ret = max(ret, (k - 1) ** 2)
return ret
def test():
assert Solution().maximalSquare(
matrix=[["0", "1", "1", "0", "0", "1", "0", "1", "0", "1"],
["0", "0", "1", "0", "1", "0", "1", "0", "1", "0"],
["1", "0", "0", "0", "0", "1", "0", "1", "1", "0"],
["0", "1", "1", "1", "1", "1", "1", "0", "1", "0"],
["0", "0", "1", "1", "1", "1", "1", "1", "1", "0"],
["1", "1", "0", "1", "0", "1", "1", "1", "1", "0"],
["0", "0", "0", "1", "1", "0", "0", "0", "1", "0"],
["1", "1", "0", "1", "1", "0", "0", "1", "1", "1"],
["0", "1", "0", "1", "1", "0", "1", "0", "1", "1"]]) == 4
assert Solution().maximalSquare(matrix=[["0", "1"], ["1", "0"]]) == 1
assert Solution().maximalSquare(matrix=[["1", "1"], ["1", "1"]]) == 4
assert Solution().maximalSquare(matrix=[["0"]]) == 0
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/222-CountCompleteTreeNodes.py
```python
import collections
from typing import Optional
from tree_node import TreeNode, build_tree_node
class Solution:
def countNodes(self, root: Optional[TreeNode]) -> int:
"""
The number of nodes in the tree is in the range [0, 5 * 10**4].
0 <= Node.val <= 5 * 10**4
The tree is guaranteed to be complete.
:param root:
:return:
"""
if root is None:
return 0
dq = collections.deque()
dq.append(root)
ret = 0
while len(dq) > 0:
node = dq.pop()
ret += 1
if node.left is not None:
dq.append(node.left)
if node.right is not None:
dq.append(node.right)
return ret
def countNodes2(self, root: Optional[TreeNode]) -> int:
"""
Runtime: 90 ms, faster than 49.51%
Memory Usage: 21.6 MB, less than 57.82%
Reference: https://leetcode.com/problems/count-complete-tree-nodes/discuss/62088/My-python-solution-in-O(lgn-*-lgn)-time
The number of nodes in the tree is in the range [0, 5 * 10**4].
0 <= Node.val <= 5 * 10**4
The tree is guaranteed to be complete.
:param root:
:return:
"""
def get_tree_height(tree):
if tree is None:
return 0
return 1 + get_tree_height(tree.left)
if root is None:
return 0
left_height = get_tree_height(root.left)
right_height = get_tree_height(root.right)
if left_height == right_height:
# left sub tree must be a full binary tree
return 2 ** left_height + self.countNodes(root.right)
else:
# right sub tree must be a full binary tree
return 2 ** right_height + self.countNodes(root.left)
def test():
assert Solution().countNodes2(root=build_tree_node([])) == 0
assert Solution().countNodes2(root=build_tree_node([1])) == 1
assert Solution().countNodes2(root=build_tree_node([1, 2])) == 2
assert Solution().countNodes2(root=build_tree_node([1, 2, 3])) == 3
assert Solution().countNodes2(root=build_tree_node([1, 2, 3, 4])) == 4
assert Solution().countNodes2(root=build_tree_node([1, 2, 3, 4, 5])) == 5
assert Solution().countNodes2(root=build_tree_node([1, 2, 3, 4, 5, 6])) == 6
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/22-GenerateParentheses.py
```python
from typing import List
class Solution:
def generateParenthesis(self, n: int) -> List[str]:
"""
8 / 8 test cases passed.
Status: Accepted
Runtime: 36 ms
Memory Usage: 14.7 MB
:param n:
:return:
"""
if n == 1:
return ["()"]
ret = self.generateParenthesis(n - 1)
ret_set = set()
for p in ret:
ret_set.add(f'(){p}')
ret_set.add(f'{p}()')
for i in range(len(p)):
if p[i] == '(':
ret_set.add(f'{p[:i + 1]}(){p[i + 1:]}')
return list(ret_set)
def test():
assert Solution().generateParenthesis(n=1) == ["()"]
ans = ["()()", "(())"]
ret = Solution().generateParenthesis(n=2)
assert len(ret) == len(ans)
for a in ans:
assert a in ret
ans = ["((()))", "(()())", "(())()", "()(())", "()()()"]
ret = Solution().generateParenthesis(n=3)
assert len(ret) == len(ans)
for a in ans:
assert a in ret
ans = ["(((())))", "((()()))", "((())())", "((()))()", "(()(()))", "(()()())", "(()())()", "(())(())", "(())()()",
"()((()))", "()(()())", "()(())()", "()()(())", "()()()()"]
ret = Solution().generateParenthesis(n=4)
assert len(ret) == len(ans)
for a in ans:
assert a in ret
ret = Solution().generateParenthesis(n=8)
print(ret)
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/237-DeleteNodeinaLinkedList.py
```python
from list_node import ListNode, buildListNode
class Solution:
def deleteNode(self, node: ListNode):
"""
41 / 41 test cases passed.
Status: Accepted
Runtime: 40 ms
Memory Usage: 14.9 MB
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next
def test():
# No test case applied.
pass
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/24-SwapNodesInPairs.py
```python
from list_node import ListNode, buildListNode
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
ret = tmp = self
tmp.next = head
while tmp.next and tmp.next.next:
left = tmp.next
right = left.next
tmp.next, right.next, left.next = right, left, right.next
# tmp.next = right
# right.next = left
# left.next = right.next
tmp = left
return ret.next
def test():
assert Solution().swapPairs(buildListNode([1, 2, 3, 4])) == buildListNode([2, 1, 4, 3])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/268-MissingNumber.py
```python
from typing import List
class Solution:
def missingNumber(self, nums: List[int]) -> int:
"""
122 / 122 test cases passed.
Status: Accepted
Runtime: 128 ms
Memory Usage: 15.2 MB
:param nums:
:return:
"""
return len(nums) * (len(nums) + 1) // 2 - sum(nums)
def test():
assert Solution().missingNumber(nums=[3, 0, 1]) == 2
assert Solution().missingNumber(nums=[0, 1]) == 2
assert Solution().missingNumber(nums=[9, 6, 4, 2, 3, 5, 7, 0, 1]) == 8
assert Solution().missingNumber(nums=[0]) == 1
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/283-MoveZeroes.py
```python
from typing import List
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
l = len(nums) - 1
i = 0
while i < l:
if nums[i] == 0:
l -= 1
nums[i:] = nums[i + 1:] + [0]
else:
i += 1
def moveZeroes2(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
zeros = 0
for i in range(len(nums)):
if zeros > 0:
nums[i - zeros] = nums[i]
if nums[i] == 0:
zeros += 1
if zeros > 0:
nums[-zeros:] = [0] * zeros
def test():
nums = [0, 1, 0, 3, 12]
Solution().moveZeroes(nums)
assert nums == [1, 3, 12, 0, 0]
nums = [0, 0, 1]
Solution().moveZeroes(nums)
assert nums == [1, 0, 0]
nums = [0, 1, 0, 3, 12]
Solution().moveZeroes2(nums)
assert nums == [1, 3, 12, 0, 0]
nums = [0, 0, 1]
Solution().moveZeroes2(nums)
assert nums == [1, 0, 0]
nums = [1]
Solution().moveZeroes2(nums)
assert nums == [1]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/28-ImplementStrstr.py
```python
class Solution:
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if not needle or haystack == needle:
return 0
len_n = len(needle)
for i in range(len(haystack) - len_n + 1):
if haystack[i:i + len_n] == needle:
return i
return -1
def test():
assert Solution().strStr("test", "") == 0
assert Solution().strStr("hello", "ll") == 2
assert Solution().strStr("hello", "k") == -1
assert Solution().strStr("hello", "hello") == 0
assert Solution().strStr("mississippi", "pi") == 9
```
#### File: myleetcode/src/290-WordPattern.py
```python
class Solution:
def wordPattern(self, pattern: str, s: str) -> bool:
"""
Runtime: 38 ms, faster than 32.55%
Memory Usage: 14.2 MB, less than 55.31%
1 <= pattern.length <= 300
pattern contains only lower-case English letters.
1 <= s.length <= 3000
s contains only lowercase English letters and spaces ' '.
s does not contain any leading or trailing spaces.
All the words in s are separated by a single space.
:param pattern:
:param s:
:return:
"""
values = s.split()
if len(pattern) != len(values):
return False
d = dict()
d2 = dict()
for i in range(len(pattern)):
p = pattern[i]
v = values[i]
if p in d and v != d.get(p):
return False
elif p not in d:
d[p] = v
if v in d2 and p != d2.get(v):
return False
elif v not in d2:
d2[v] = p
return True
def test():
assert Solution().wordPattern(pattern="abba", s="dog cat cat dog")
assert not Solution().wordPattern(pattern="abba", s="dog dog dog dog")
assert not Solution().wordPattern(pattern="abba", s="dog cat cat fish")
assert not Solution().wordPattern(pattern="aaaa", s="dog cat cat dog")
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/2-AddTwoNumbers.py
```python
from typing import Optional
from list_node import *
class Solution:
def addTwoNumbers2(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
"""
Updated at 2021-12-06
1568 / 1568 test cases passed.
Runtime: 68 ms, faster than 77.74%
Memory Usage: 14.2 MB, less than 91.26%
The number of nodes in each linked list is in the range [1, 100].
0 <= Node.val <= 9
It is guaranteed that the list represents a number that does not have leading zeros.
:param l1:
:param l2:
:return:
"""
node1, node2 = l1, l2
flag = 0
while node1 is not None or node2 is not None or flag > 0:
if node1 is None and node2 is None:
last_node1.next = ListNode(flag)
break
if node1 is None:
node1 = node2
last_node1.next = node1
node2 = None
node2val = node2.val if node2 is not None else 0
ret = flag + node1.val + node2val
flag, node1.val = divmod(ret, 10)
last_node1 = node1
node1 = node1.next
if node2 is not None:
node2 = node2.next
return l1
def add_two_numbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
add_one = False
l3 = tmp = ListNode(0)
while l1 or l2:
result = 0
if l1:
result += l1.val
l1 = l1.next
if l2:
result += l2.val
l2 = l2.next
if add_one:
result += 1
add_one = False
if result > 9:
add_one = True
tmp.next = ListNode(result % 10)
tmp = tmp.next
if add_one:
tmp.next = ListNode(1)
return l3.next
def test():
assert Solution().add_two_numbers(
buildListNode([2, 4, 3]),
buildListNode([5, 6, 4])
) == buildListNode([7, 0, 8])
assert Solution().addTwoNumbers2(
l1=buildListNode([2, 4, 3]),
l2=buildListNode([5, 6, 4])
) == buildListNode([7, 0, 8])
assert Solution().addTwoNumbers2(
l1=buildListNode([0]),
l2=buildListNode([0])
) == buildListNode([0])
assert Solution().addTwoNumbers2(
l1=buildListNode([9, 9, 9, 9, 9, 9, 9]),
l2=buildListNode([9, 9, 9, 9])
) == buildListNode([8, 9, 9, 9, 0, 0, 0, 1])
assert Solution().addTwoNumbers2(
l1=buildListNode([5]),
l2=buildListNode([5])
) == buildListNode([0, 1])
assert Solution().addTwoNumbers2(
l1=buildListNode([0]),
l2=buildListNode([1])
) == buildListNode([1])
assert Solution().addTwoNumbers2(
l1=buildListNode([9, 9, 9, 9, 9, 9, 9]),
l2=buildListNode([1])
) == buildListNode([0, 0, 0, 0, 0, 0, 0, 1])
assert Solution().addTwoNumbers2(
l2=buildListNode([9, 9, 9, 9, 9, 9, 9]),
l1=buildListNode([1])
) == buildListNode([0, 0, 0, 0, 0, 0, 0, 1])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/303-RangeSumQuery-Immutable.py
```python
import collections
import itertools
from typing import List
class NumArray:
"""
15 / 15 test cases passed.
Status: Accepted
Runtime: 1100 ms
Memory Usage: 17.7 MB
"""
def __init__(self, nums: List[int]):
self.nums = nums
def sumRange(self, left: int, right: int) -> int:
return sum(self.nums[left:right + 1])
class NumArray2:
"""
"""
def __init__(self, nums: List[int]):
self.accum = list(itertools.accumulate(nums, initial=0))
def sumRange(self, left: int, right: int) -> int:
return self.accum[right + 1] - self.accum[left]
def test():
na = NumArray(nums=[-2, 0, 3, -5, 2, -1])
assert na.sumRange(0, 2) == 1
assert na.sumRange(2, 5) == -1
assert na.sumRange(0, 5) == -3
na = NumArray2(nums=[-2, 0, 3, -5, 2, -1])
assert na.sumRange(0, 2) == 1
assert na.sumRange(2, 5) == -1
assert na.sumRange(0, 5) == -3
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/334-IncreasingTripletSubsequence.py
```python
from typing import List
class Solution:
def increasingTriplet(self, nums: List[int]) -> bool:
"""
76 / 76 test cases passed.
Status: Accepted
Runtime: 695 ms
Memory Usage: 25.5 MB
:param nums:
:return:
"""
n = len(nums)
if n < 3:
return False
# set by Constraints1,
# flag_num means that there is nums[i] < nums[j] and flag_num is the smallest nums[j]
flag_num = 2 ** 31
min_value = nums[0]
for i in range(1, n):
if nums[i] > flag_num:
return True
if nums[i] > min_value:
flag_num = min(nums[i], flag_num)
min_value = min(min_value, nums[i])
return False
def test():
assert Solution().increasingTriplet(nums=[1, 2, 3, 4, 5])
assert not Solution().increasingTriplet(nums=[5, 4, 3, 2, 1])
assert Solution().increasingTriplet(nums=[2, 1, 5, 0, 4, 6])
assert Solution().increasingTriplet(nums=[2, 1, 5, 0, 6])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/33-SearchinRotatedSortedArray.py
```python
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> int:
"""
195 / 195 test cases passed.
Status: Accepted
Runtime: 60 ms
Memory Usage: 14.7 MB
:param nums:
:param target:
:return:
"""
l, h = 0, len(nums) - 1
while l <= h:
mid = l + (h - l) // 2
if nums[mid] == target:
return mid
if nums[mid] > target:
# ordered
if nums[l] <= nums[mid]:
if nums[l] > target:
# might be in nums[mid + 1, h]
l = mid + 1
else:
# might be in nums[l, mid - 1]
h = mid - 1
else:
# not ordered
h = mid - 1
else:
if nums[l] > nums[mid]:
if nums[l] > target:
l = mid + 1
else:
h = mid - 1
else:
l = mid + 1
return -1
def test():
assert Solution().search(nums=[1, 3], target=3) == 1
assert Solution().search(nums=[1, 3], target=1) == 0
assert Solution().search(nums=[3, 1], target=1) == 1
assert Solution().search(nums=[3, 1], target=3) == 0
assert Solution().search(nums=[4, 5, 6, 7, 0, 1, 2], target=0) == 4
assert Solution().search(nums=[4, 5, 6, 7, 0, 1, 2], target=4) == 0
assert Solution().search(nums=[4, 5, 6, 7, 0, 1, 2], target=3) == -1
assert Solution().search(nums=[1], target=0) == -1
assert Solution().search(nums=[1], target=1) == 0
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/367-ValidPerfectSquare.py
```python
class Solution:
def isPerfectSquare(self, num: int) -> bool:
"""
70 / 70 test cases passed.
Status: Accepted
Runtime: 39 ms
Memory Usage: 14.3 MB
1 <= num <= 2^31 - 1
:param num:
:return:
"""
if num == 1:
return True
low, high = 0, num // 2
while low <= high:
mid = (low + high) // 2
if mid * mid == num:
return True
if mid * mid > num:
high = mid - 1
else:
low = mid + 1
return False
def test():
assert Solution().isPerfectSquare(num=1)
assert not Solution().isPerfectSquare(num=2)
assert Solution().isPerfectSquare(num=16)
assert not Solution().isPerfectSquare(num=14)
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/3-LongestSubstringWithoutRepeatingCharacters.py
```python
class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
max_substr = ''
max_len = 0
for c in s:
if c not in max_substr:
max_substr += c
else:
max_len = max(len(max_substr), max_len)
max_substr = max_substr[max_substr.index(c) + 1:]
max_substr += c
max_len = max(len(max_substr), max_len)
return max_len
def lengthOfLongestSubstring2(self, s: str) -> int:
"""
20210828 do it again,
and found the solution thought is same like 3 years ago(2018-07-06)
987 / 987 test cases passed.
Status: Accepted
Runtime: 57 ms
Memory Usage: 14.2 MB
:param s:
:return:
"""
subs = ''
longest_len = 0
for i in s:
if i in subs:
longest_len = max(len(subs), longest_len)
subs = subs[subs.index(i) + 1:]
subs += i
longest_len = max(len(subs), longest_len)
return longest_len
def test():
assert Solution().lengthOfLongestSubstring('abcdcfge') == 5
assert Solution().lengthOfLongestSubstring2('') == 0
assert Solution().lengthOfLongestSubstring2(s="abcabcbb") == 3
assert Solution().lengthOfLongestSubstring2(s="bbbbb") == 1
assert Solution().lengthOfLongestSubstring2(s="pwwkew") == 3
assert Solution().lengthOfLongestSubstring2('abcdcfge') == 5
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/412-FizzBuzz.py
```python
from typing import List
class Solution:
def fizzBuzz(self, n: int) -> List[str]:
"""
8 / 8 test cases passed.
Status: Accepted
Runtime: 44 ms
Memory Usage: 15.1 MB
:param n:
:return:
"""
ret = []
for i in range(1, n + 1):
if i % 15 == 0:
ret.append('FizzBuzz')
elif i % 3 == 0:
ret.append('Fizz')
elif i % 5 == 0:
ret.append('Buzz')
else:
ret.append(str(i))
return ret
def test():
assert Solution().fizzBuzz(n=3) == ["1", "2", "Fizz"]
assert Solution().fizzBuzz(n=5) == ["1", "2", "Fizz", "4", "Buzz"]
assert Solution().fizzBuzz(n=15) == ["1", "2", "Fizz", "4", "Buzz", "Fizz", "7", "8", "Fizz", "Buzz", "11", "Fizz",
"13", "14", "FizzBuzz"]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/414-ThirdMaximumNumber.py
```python
from math import inf
from typing import List
class Solution:
def thirdMax(self, nums: List[int]) -> int:
m1 = nums[0]
m2 = None
m3 = None
for n in nums[1:]:
if n == m1 or n == m2 or n == m3:
continue
if m1 <= n:
m3 = m2
m2 = m1
m1 = n
elif not m2:
m2 = n
elif m2 <= n:
m3 = m2
m2 = n
elif not m3:
m3 = n
elif m3 < n:
m3 = n
# print(f'm1: {m1}, m2: {m2}, m3:{m3}')
return m3 if m3 is not None else m1
def thirdMax1(self, nums: List[int]) -> int:
m1 = nums[0]
m2 = -inf
m3 = -inf
for n in nums[1:]:
if n == m1 or n == m2 or n == m3:
continue
if m1 <= n:
m3 = m2
m2 = m1
m1 = n
elif m2 <= n:
m3 = m2
m2 = n
elif m3 < n:
m3 = n
# print(f'm1: {m1}, m2: {m2}, m3:{m3}')
return m3 if m3 != -inf else m1
def test():
assert Solution().thirdMax([3, 2, 1]) == 1
assert Solution().thirdMax([3, 3, 3]) == 3
assert Solution().thirdMax([1, 2, 3]) == 1
assert Solution().thirdMax([1, 2]) == 2
assert Solution().thirdMax([1]) == 1
assert Solution().thirdMax([2, 2, 3, 1]) == 1
assert Solution().thirdMax([5, 8, 3, 2, 1, 9]) == 5
assert Solution().thirdMax([3, 3, 4, 3, 4, 3, 0, 3, 3]) == 0
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/415-AddStrings.py
```python
from tool import print_results
class Solution:
@print_results
def addStrings(self, num1: str, num2: str) -> str:
"""
317 / 317 test cases passed.
Status: Accepted
Runtime: 36 ms
Memory Usage: 14.3 MB
:param num1:
:param num2:
:return:
"""
# the apparent solution
# return str(int(num1) + int(num2))
# If the number length is large enough, we use below solution
m = 0
i, j = len(num1) - 1, len(num2) - 1
ret = ''
while i >= 0 or j >= 0:
if i >= 0 and j >= 0:
tmp = int(num1[i]) + int(num2[j])
i -= 1
j -= 1
elif j >= 0:
tmp = int(num2[j])
j -= 1
elif i >= 0:
tmp = int(num1[i])
i -= 1
m, n = divmod(tmp + m, 10)
ret = str(n) + ret
if m == 1:
return '1' + ret
else:
return ret
def test():
assert Solution().addStrings(num1="1", num2="9999") == '10000'
assert Solution().addStrings(num1="11", num2="123") == '134'
assert Solution().addStrings(num1="456", num2="77") == '533'
assert Solution().addStrings(num1="0", num2="0") == '0'
assert Solution().addStrings(num1="1", num2="0") == '1'
assert Solution().addStrings(num1="5555", num2="5555") == '11110'
assert Solution().addStrings(num1="6994", num2="36") == '7030'
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/441-ArrangingCoins.py
```python
class Solution:
def arrangeCoins(self, n: int) -> int:
"""
Runtime: 924 ms, faster than 35.91%
Memory Usage: 14.3 MB, less than 39.42%
1 <= n <= 2^31 - 1
:param n:
:return:
"""
i = 1
while n >= i:
n, i = n - i, i + 1
return i - 1
def test():
assert Solution().arrangeCoins(n=1) == 1
assert Solution().arrangeCoins(n=2) == 1
assert Solution().arrangeCoins(n=3) == 2
assert Solution().arrangeCoins(n=4) == 2
assert Solution().arrangeCoins(n=5) == 2
assert Solution().arrangeCoins(n=6) == 3
assert Solution().arrangeCoins(n=7) == 3
assert Solution().arrangeCoins(n=8) == 3
assert Solution().arrangeCoins(n=9) == 3
assert Solution().arrangeCoins(n=10) == 4
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/442-FindAllDuplicatesinanArray.py
```python
from typing import List
from tool import equal_list_value
class Solution:
def findDuplicates(self, nums: List[int]) -> List[int]:
"""
28 / 28 test cases passed.
Status: Accepted
Runtime: 372 ms
Memory Usage: 21.6 MB
:param nums:
:return:
"""
ret = []
for i in range(len(nums)):
index = abs(nums[i]) - 1
if nums[index] < 0:
ret.append(abs(nums[i]))
nums[index] = -nums[index]
return ret
def test():
assert equal_list_value(Solution().findDuplicates(nums=[4, 3, 2, 7, 8, 2, 3, 1]), [2, 3])
assert equal_list_value(Solution().findDuplicates(nums=[10, 2, 5, 10, 9, 1, 1, 4, 3, 7]), [10, 1])
assert Solution().findDuplicates(nums=[1, 1, 2]) == [1]
assert Solution().findDuplicates(nums=[1]) == []
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/450-DeleteNodeinaBST.py
```python
from typing import Optional
from src.tree_node import TreeNode, build_tree_node
class Solution:
def deleteNode(self, root: Optional[TreeNode], key: int) -> Optional[TreeNode]:
"""
Runtime: 76 ms, faster than 63.07%
Memory Usage: 18.5 MB, less than 22.29%
The number of nodes in the tree is in the range [0, 10^4].
-10^5 <= Node.val <= 10^5
Each node has a unique value.
root is a valid binary search tree.
-10^5 <= key <= 10^5
:param root:
:param key:
:return:
"""
if root is None:
return root
# if not found key in BST, just return root
# if found, noted as keynode, and we need replace this node
# by the maximum node in its left side or minimum node in its right side.
def replace(node):
if node is None:
return None
if node.left is None:
return node.right
if node.right is None:
return node.left
# we choose left maximum node to be the root node
parent_node = node
tmp_node = parent_node.left
if tmp_node.right is None:
tmp_node.right = node.right
return tmp_node
while tmp_node.right is not None:
parent_node = tmp_node
tmp_node = tmp_node.right
parent_node.right = tmp_node.left
tmp_node.left = node.left
tmp_node.right = node.right
return tmp_node
if root.val == key:
return replace(root)
p_node = root
while p_node is not None:
if p_node.left and key == p_node.left.val:
p_node.left = replace(p_node.left)
return root
if p_node.right and key == p_node.right.val:
p_node.right = replace(p_node.right)
return root
if key < p_node.val:
p_node = p_node.left
else:
p_node = p_node.right
return root
def test():
null = None
tree_list = [22, 19, 24, 17, 21, 23, null, 16, 18, 20]
root = build_tree_node(tree_list)
ret_list = [21, 19, 24, 17, 20, 23, null, 16, 18]
assert Solution().deleteNode(root=root, key=22) == build_tree_node(ret_list)
# Delete root node
ret = Solution().deleteNode(root=build_tree_node([2, 1, 3]), key=2)
assert ret == build_tree_node([1, null, 3]) or ret == build_tree_node([3, 1])
assert Solution().deleteNode(root=build_tree_node([2, 1, 3]), key=1) == build_tree_node([2, null, 3])
assert Solution().deleteNode(root=build_tree_node([2, 1, 3]), key=3) == build_tree_node([2, 1])
assert Solution().deleteNode(root=build_tree_node([2, 1, 3]), key=4) == build_tree_node([2, 1, 3])
ret = Solution().deleteNode(root=build_tree_node([5, 3, 6, 2, 4, null, 7]), key=3)
assert ret == build_tree_node([5, 4, 6, 2, null, null, 7]) or ret == build_tree_node([5, 2, 6, null, 4, null, 7])
assert Solution().deleteNode(root=build_tree_node([5, 3, 6, 2, 4, null, 7]), key=0) == \
build_tree_node([5, 3, 6, 2, 4, null, 7])
assert Solution().deleteNode(root=build_tree_node([]), key=0) == build_tree_node([])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/45-JumpGameII.py
```python
from typing import List
class Solution:
def jump(self, nums: List[int]) -> int:
"""
106 / 106 test cases passed.
Status: Accepted
Runtime: 148 ms
Memory Usage: 15.1 MB
:param nums:
:return:
"""
n = len(nums)
dp = [0] * n
last = 0
for i in range(n):
j = max(i + 1, last)
while j < min(n, i + nums[i] + 1):
dp[j] = dp[i] + 1
j += 1
last = j
return dp[-1]
def test():
assert Solution().jump(nums=[2]) == 0
assert Solution().jump(nums=[1, 1, 1, 1, 1]) == 4
assert Solution().jump(nums=[2, 3, 1, 1, 4]) == 2
assert Solution().jump(nums=[2, 3, 0, 1, 4]) == 2
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/463-IslandPerimeter.py
```python
from typing import List
class Solution:
def islandPerimeter(self, grid: List[List[int]]) -> int:
"""
5833 / 5833 test cases passed.
Status: Accepted
Runtime: 842 ms
Memory Usage: 14.4 MB
:param grid:
:return:
"""
ret = 0
m = len(grid)
n = len(grid[0])
for i in range(m):
for j in range(n):
if grid[i][j] == 1:
if i == 0 or grid[i - 1][j] == 0:
ret += 1
if i == m - 1 or grid[i + 1][j] == 0:
ret += 1
if j == 0 or grid[i][j - 1] == 0:
ret += 1
if j == n - 1 or grid[i][j + 1] == 0:
ret += 1
return ret
def test():
assert Solution().islandPerimeter(grid=[[0, 1, 0, 0], [1, 1, 1, 0], [0, 1, 0, 0], [1, 1, 0, 0]]) == 16
assert Solution().islandPerimeter(grid=[[1]]) == 4
assert Solution().islandPerimeter(grid=[[1, 0]]) == 4
assert Solution().islandPerimeter(grid=[[1, 1], [1, 1]]) == 8
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/485-MaxConsecutiveOnes.py
```python
from typing import List
class Solution:
def findMaxConsecutiveOnes(self, nums: List[int]) -> int:
"""
42 / 42 test cases passed.
Status: Accepted
Runtime: 340 ms
Memory Usage: 14.1 MB
:param nums:
:return:
"""
if not nums:
return 0
ret = 0
cur = 0
for i in nums:
if i == 1:
cur += 1
else:
ret = max(ret, cur)
cur = 0
return max(ret, cur)
def test():
assert Solution().findMaxConsecutiveOnes([1, 1, 0, 1, 1, 1]) == 3
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/496-NextGreaterElementI.py
```python
from typing import List
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
"""
Runtime: 94 ms, faster than 19.85%
Memory Usage: 14.5 MB, less than 73.08%
1 <= nums1.length <= nums2.length <= 1000
0 <= nums1[i], nums2[i] <= 10**4
All integers in nums1 and nums2 are unique.
All the integers of nums1 also appear in nums2.
:param nums1:
:param nums2:
:return:
"""
ret = []
for num in nums1:
index = nums2.index(num)
for n in nums2[index + 1:]:
if n > num:
ret.append(n)
break
else:
ret.append(-1)
return ret
def test():
assert Solution().nextGreaterElement(nums1=[4], nums2=[4]) == [-1]
assert Solution().nextGreaterElement(nums1=[4, 1, 2], nums2=[1, 3, 4, 2]) == [-1, 3, -1]
assert Solution().nextGreaterElement(nums1=[2, 4], nums2=[1, 2, 3, 4]) == [3, -1]
assert Solution().nextGreaterElement(nums1=[2, 4, 3, 1], nums2=[4, 3, 2, 1]) == [-1, -1, -1, -1]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/49-GroupAnagrams.py
```python
import collections
from typing import List
from tool import print_results
class Solution:
@print_results
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
"""
114 / 114 test cases passed.
Status: Accepted
Runtime: 140 ms
Memory Usage: 28 MB
:param strs:
:return:
"""
d = collections.defaultdict(list)
for s in strs:
key = frozenset(collections.Counter(s).items())
d[key].append(s)
return list(d.values())
def test():
ans = [["bat"], ["nat", "tan"], ["ate", "eat", "tea"]]
ret = Solution().groupAnagrams(strs=["eat", "tea", "tan", "ate", "nat", "bat"])
# just check the length of the result, maybe add check the content next time.
assert len(ans) == len(ret)
assert Solution().groupAnagrams(strs=[""]) == [[""]]
assert Solution().groupAnagrams(strs=["a"]) == [["a"]]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/509-FibonacciNumber.py
```python
class Solution:
cache = {}
def fib(self, N: int) -> int:
if N < 2:
return N
if N in self.cache:
return self.cache[N]
else:
ret = self.fib(N - 1) + self.fib(N - 2)
self.cache[N] = ret
return ret
def test():
assert Solution().fib(0) == 0
assert Solution().fib(1) == 1
assert Solution().fib(2) == 1
assert Solution().fib(4) == 3
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/53-MaximumSubarray.py
```python
from typing import List
class Solution:
def maxSubArray2(self, nums: List[int]) -> int:
"""
Updated on 2021-11-25
Runtime: 720 ms, faster than 78.96%
Memory Usage: 28.8 MB, less than 33.13%
1 <= nums.length <= 10^5
-10^4 <= nums[i] <= 10^4
:param nums:
:return:
"""
last_max_sum = nums[0]
ret = last_max_sum
for num in nums[1:]:
if last_max_sum < 0:
last_max_sum = num
else:
last_max_sum += num
ret = max(ret, last_max_sum)
return ret
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
max_sum = nums[0]
tmp_sum = nums[0]
for i in range(1, len(nums)):
tmp_sum = max(nums[i], tmp_sum + nums[i])
max_sum = max(max_sum, tmp_sum)
return max_sum
def test():
assert Solution().maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4]) == 6
assert Solution().maxSubArray2(nums=[-2, 1, -3, 4, -1, 2, 1, -5, 4]) == 6
assert Solution().maxSubArray2(nums=[5, 4, -1, 7, 8]) == 23
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/542-01Matrix.py
```python
from typing import List
class Solution:
def updateMatrix(self, mat: List[List[int]]) -> List[List[int]]:
"""
49 / 49 test cases passed.
Status: Accepted
Runtime: 564 ms
Memory Usage: 17.2 MB
m == mat.length
n == mat[i].length
1 <= m, n <= 10^4
1 <= m * n <= 10^4
mat[i][j] is either 0 or 1.
There is at least one 0 in mat.
:param mat:
:return:
"""
m = len(mat)
n = len(mat[0])
# ret = [[10001] * n] * m This way would make disasters! `*` replicates reference for object.
ret = [[10001 for _ in range(n)] for _ in range(m)]
# get minimum steps from top and left
for i in range(m):
for j in range(n):
if mat[i][j] == 0:
ret[i][j] = 0
else:
if i > 0:
ret[i][j] = min(ret[i - 1][j] + 1, ret[i][j])
if j > 0:
ret[i][j] = min(ret[i][j - 1] + 1, ret[i][j])
# rescan to get minimum steps from bottom and right
for i in range(m - 1, -1, -1):
for j in range(n - 1, -1, -1):
if ret[i][j] == 0:
continue
else:
if i < m - 1:
ret[i][j] = min(ret[i + 1][j] + 1, ret[i][j])
if j < n - 1:
ret[i][j] = min(ret[i][j + 1] + 1, ret[i][j])
return ret
def test():
assert Solution().updateMatrix(
mat=[[1, 0, 1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
[1, 0, 1, 0, 1, 1, 1, 1, 1, 1], [0, 1, 0, 1, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 1, 1, 1, 0, 1], [1, 1, 1, 1, 1, 1, 1, 0, 1, 0],
[1, 1, 1, 1, 0, 1, 0, 0, 1, 1]]) == \
[[1, 0, 1, 1, 0, 0, 1, 0, 0, 1], [0, 1, 1, 0, 1, 0, 1, 0, 1, 1], [0, 0, 1, 0, 1, 0, 0, 1, 0, 0],
[1, 0, 1, 0, 1, 1, 1, 1, 1, 1], [0, 1, 0, 1, 1, 0, 0, 0, 0, 1], [0, 0, 1, 0, 1, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 1, 1], [1, 0, 0, 0, 1, 2, 1, 1, 0, 1], [2, 1, 1, 1, 1, 2, 1, 0, 1, 0],
[3, 2, 2, 1, 0, 1, 0, 0, 1, 1]]
assert Solution().updateMatrix(mat=[[0]]) == [[0]]
assert Solution().updateMatrix(mat=[[0], [1]]) == [[0], [1]]
assert Solution().updateMatrix(mat=[[0, 0, 0], [0, 1, 0], [0, 0, 0]]) == [[0, 0, 0], [0, 1, 0], [0, 0, 0]]
assert Solution().updateMatrix(mat=[[0, 0, 0], [0, 1, 0], [1, 1, 1]]) == [[0, 0, 0], [0, 1, 0], [1, 2, 1]]
assert Solution().updateMatrix(
mat=[[1, 1, 0, 0, 1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 0, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 1, 1, 1], [0, 0, 1, 1, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 0, 0, 1], [1, 1, 1, 1, 1, 0, 0, 1, 1, 1], [0, 1, 0, 1, 1, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 1, 0, 1, 1, 1, 1]]) == \
[[2, 1, 0, 0, 1, 0, 0, 1, 1, 0], [1, 0, 0, 1, 0, 1, 1, 2, 2, 1], [1, 1, 1, 0, 0, 1, 2, 2, 1, 0],
[0, 1, 2, 1, 0, 1, 2, 3, 2, 1], [0, 0, 1, 2, 1, 2, 1, 2, 1, 0], [1, 1, 2, 3, 2, 1, 0, 1, 1, 1],
[0, 1, 2, 3, 2, 1, 1, 0, 0, 1], [1, 2, 1, 2, 1, 0, 0, 1, 1, 2], [0, 1, 0, 1, 1, 0, 1, 2, 2, 3],
[1, 2, 1, 0, 1, 0, 1, 2, 3, 4]]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/55-JumpGame.py
```python
from typing import List
class Solution:
def canJump(self, nums: List[int]) -> bool:
"""
166 / 166 test cases passed.
Status: Accepted
Runtime: 5652 ms
Memory Usage: 15.3 MB
:param nums:
:return:
"""
n = len(nums)
if n == 1:
return True
jmp_lst = [False for _ in range(n)]
jmp_lst[n - 1] = True
for i in range(n - 2, -1, -1):
for j in range(min(n - i - 1, nums[i]), 0, -1):
if jmp_lst[i + j]:
jmp_lst[i] = True
break
return jmp_lst[0]
def canJump2(self, nums: List[int]) -> bool:
"""
166 / 166 test cases passed.
Status: Accepted
Runtime: 500 ms
Memory Usage: 15.2 MB
:param nums:
:return:
"""
max_step = 0
for i, n in enumerate(nums):
if max_step < i:
return False
max_step = max(i + n, max_step)
return True
def test():
assert Solution().canJump(nums=[0])
assert not Solution().canJump(nums=[0, 1])
assert Solution().canJump(nums=[1, 0])
assert Solution().canJump(nums=[2, 0, 1])
assert Solution().canJump(nums=[2, 3, 1, 1, 4])
assert not Solution().canJump(nums=[3, 2, 1, 0, 4])
assert Solution().canJump2(nums=[0])
assert not Solution().canJump2(nums=[0, 1])
assert Solution().canJump2(nums=[1, 0])
assert Solution().canJump2(nums=[2, 0, 1])
assert Solution().canJump2(nums=[2, 3, 1, 1, 4])
assert not Solution().canJump2(nums=[3, 2, 1, 0, 4])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/56-MergeIntervals.py
```python
from typing import List
class Solution:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
"""
168 / 168 test cases passed.
Status: Accepted
Runtime: 76 ms
Memory Usage: 16.1 MB
:param intervals:
:return:
"""
intervals = sorted(intervals, key=lambda x: x[0])
ret = []
tmp = intervals[0]
for interval in intervals[1:]:
if interval[1] < tmp[1]:
continue
if interval[0] <= tmp[1]:
tmp[1] = interval[1]
else:
# interval[0] > tmp[1]
ret.append(tmp)
tmp = interval
ret.append(tmp)
return ret
def test():
assert Solution().merge(intervals=[[1, 1]]) == [[1, 1]]
assert Solution().merge(intervals=[[1, 3], [2, 6], [8, 10], [15, 18]]) == [[1, 6], [8, 10], [15, 18]]
assert Solution().merge(intervals=[[1, 4], [4, 5]]) == [[1, 5]]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/5963-ANumberAfteraDoubleReversal.py
```python
class Solution:
def isSameAfterReversals(self, num: int) -> bool:
"""
0 <= num <= 10^6
:param num:
:return:
"""
return not ((num % 10) == 0 and num > 0)
def test():
assert Solution().isSameAfterReversals(num=0)
assert Solution().isSameAfterReversals(num=526)
assert not Solution().isSameAfterReversals(num=5260)
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/598-RangeAdditionII.py
```python
from typing import List
class Solution:
def maxCount(self, m: int, n: int, ops: List[List[int]]) -> int:
"""
69 / 69 test cases passed.
Status: Accepted
Runtime: 64 ms
Memory Usage: 16.1 MB
:param m:
:param n:
:param ops:
:return:
"""
m_min, n_min = m, n
for op in ops:
m_min = min(op[0], m_min)
n_min = min(op[1], n_min)
return m_min * n_min
def test():
assert Solution().maxCount(m=3, n=3, ops=[[2, 2], [3, 3]]) == 4
assert Solution().maxCount(m=3, n=3, ops=[[2, 3], [3, 2]]) == 4
assert Solution().maxCount(
m=3, n=3,
ops=[[2, 2], [3, 3], [3, 3], [3, 3], [2, 2], [3, 3], [3, 3], [3, 3], [2, 2], [3, 3], [3, 3], [3, 3]]
) == 4
assert Solution().maxCount(m=3, n=3, ops=[]) == 9
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/605-CanPlaceFlowers.py
```python
from typing import List
class Solution:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
"""
Runtime: 284 ms, faster than 12.11%
Memory Usage: 14.3 MB, less than 99.36%
1 <= flowerbed.length <= 2 * 10^4
flowerbed[i] is 0 or 1.
There are no two adjacent flowers in flowerbed.
0 <= n <= flowerbed.length
:param flowerbed:
:param n:
:return:
"""
if n == 0:
return True
ret = 0
i = 0
while i < len(flowerbed) and ret < n:
if flowerbed[i] == 0 and (i == 0 or flowerbed[i - 1] == 0) and (
i == len(flowerbed) - 1 or flowerbed[i + 1] == 0):
flowerbed[i] = 1
ret += 1
if ret >= n:
return True
i += 1
return False
def test():
assert Solution().canPlaceFlowers(flowerbed=[0], n=1)
assert Solution().canPlaceFlowers(flowerbed=[1, 0, 0, 0, 0, 0, 1], n=2)
assert Solution().canPlaceFlowers(flowerbed=[0, 0, 1, 0, 1], n=1)
assert Solution().canPlaceFlowers(flowerbed=[0, 0, 1, 0, 0], n=2)
assert Solution().canPlaceFlowers(flowerbed=[1, 0, 0, 0, 1], n=1)
assert not Solution().canPlaceFlowers(flowerbed=[1, 0, 0, 0, 1], n=2)
assert not Solution().canPlaceFlowers(flowerbed=[1, 0, 1, 0, 1, 0, 1], n=1)
assert not Solution().canPlaceFlowers(flowerbed=[1, 0, 0, 0, 0, 1], n=2)
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/677-MapSumPairs.py
```python
import collections
class MapSum:
"""
35 / 35 test cases passed.
Status: Accepted
Runtime: 28 ms
Memory Usage: 14.4 MB
Runtime: 28 ms, faster than 89.65% of Python3 online submissions for Map Sum Pairs.
Memory Usage: 14.4 MB, less than 25.00% of Python3 online submissions for Map Sum Pairs.
"""
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = {}
def insert(self, key: str, val: int) -> None:
"""
Inserts the key-val pair into the map.
If the key already existed, the original key-value pair
will be overridden to the new one.
:param key:
:param val:
:return:
"""
cur = self.root
for c in key:
if c not in cur:
cur[c] = {}
cur = cur[c]
# set the word value at the last character.
cur['val'] = val
def sum(self, prefix: str) -> int:
cur = self.root
for c in prefix:
if c not in cur:
return 0
cur = cur[c]
ret = 0
# add all children values
dq = collections.deque()
dq.append(cur)
while len(dq) > 0:
cur = dq.pop()
if 'val' in cur:
ret += cur['val']
[dq.append(x) for k, x in cur.items() if k != 'val']
return ret
def test():
mapSum = MapSum()
mapSum.insert("apple", 3)
assert mapSum.sum("ap") == 3 # return 3 (apple=3)
mapSum.insert("app", 2)
assert mapSum.sum("ap") == 5 # return 5 (apple + app = 3 + 2 = 5)
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/69-SqrtX.py
```python
class Solution:
def mySqrt2(self, x: int) -> int:
"""
Updated at 2021/12/02
1017 / 1017 test cases passed.
Status: Accepted
Runtime: 32 ms
Memory Usage: 14.3 MB
0 <= x <= 2^31 - 1
:param x:
:return:
"""
low, high = 0, x
while low <= high:
mid = (low + high) // 2
if mid * mid == x:
return mid
elif mid * mid > x:
high = mid - 1
else:
low = mid + 1
return low - 1
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
if x == 1:
return 1
low = 0
high = x
while low < high - 1:
mid = low + (high - low) // 2
ret = mid * mid
if ret == x:
return mid
if ret > x:
high = mid
else:
low = mid
return low
def test():
assert Solution().mySqrt2(1) == 1
assert Solution().mySqrt2(2) == 1
assert Solution().mySqrt2(4) == 2
assert Solution().mySqrt2(8) == 2
assert Solution().mySqrt(4) == 2
assert Solution().mySqrt(8) == 2
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/700-SearchinaBinarySearchTree.py
```python
from typing import Optional
from tree_node import TreeNode, build_tree_node
class Solution:
def searchBST(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
"""
36 / 36 test cases passed.
Status: Accepted
Runtime: 89 ms
Memory Usage: 16.1 MB
:param root:
:param val:
:return:
"""
if root is None:
return None
if root.val == val:
return root
if root.val < val:
return self.searchBST(root.right, val)
else:
return self.searchBST(root.left, val)
def test():
assert Solution().searchBST(root=build_tree_node([4, 2, 7, 1, 3]), val=2) == build_tree_node([2, 1, 3])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/707-DesignLinkedList.py
```python
class Node:
def __init__(self, val: int):
self.val = val
self.next = None
class MyLinkedList:
def __init__(self):
"""
Initialize your data structure here.
"""
self.first = None
self.length = 0
def get(self, index: int) -> int:
"""
Get the value of the index-th node in the linked list. If the index is invalid, return -1.
"""
if index >= self.length:
return -1
else:
i = 0
tmp_node: Node = self.first
while i < index:
tmp_node = tmp_node.next
i += 1
return tmp_node.val
def addAtHead(self, val: int) -> None:
"""
Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.
"""
tmp_node: Node = Node(val)
tmp_node.next = self.first
self.first = tmp_node
self.length += 1
def addAtTail(self, val: int) -> None:
"""
Append a node of value val to the last element of the linked list.
"""
# i = 1
# tmp_node = self.first
# while i < self.length:
# tmp_node = tmp_node.next
# tmp_node.next = Node(val)
# # self.first = tmp_node
# self.length += 1
self.addAtIndex(index=self.length, val=val)
def addAtIndex(self, index: int, val: int) -> None:
"""
Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.
"""
if index > self.length:
return
if index == 0:
self.addAtHead(val)
return
i = 1
tmp_node = self.first
while i < index:
tmp_node = tmp_node.next
i += 1
insert_node = Node(val)
insert_node.next = tmp_node.next
tmp_node.next = insert_node
self.length += 1
def deleteAtIndex(self, index: int) -> None:
"""
Delete the index-th node in the linked list, if the index is valid.
"""
if index >= self.length:
return
if index == 0:
self.first = self.first.next
return
i = 1
tmp_node = self.first
while i < index:
tmp_node = tmp_node.next
i += 1
tmp_node.next = tmp_node.next.next
self.length -= 1
def debug_print(self):
val_list = []
tmp_node = self.first
while tmp_node:
val_list.append(tmp_node.val)
tmp_node = tmp_node.next
print(val_list)
return val_list
def test():
# Your MyLinkedList object will be instantiated and called as such:
obj = MyLinkedList()
assert obj.debug_print() == []
# param_1 = obj.get(0)
obj.addAtHead(1)
assert obj.debug_print() == [1]
obj.addAtTail(0)
assert obj.debug_print() == [1, 0]
obj.addAtIndex(1, 10)
assert obj.debug_print() == [1, 10, 0]
assert obj.get(2) == 0
assert obj.get(3) == -1
assert obj.get(0) == 1
assert obj.get(1) == 10
assert obj.get(2) == 0
obj.deleteAtIndex(1)
assert obj.debug_print() == [1, 0]
obj.deleteAtIndex(1)
assert obj.debug_print() == [1]
obj.deleteAtIndex(1)
assert obj.debug_print() == [1]
# ["MyLinkedList","addAtHead","deleteAtIndex"]
# [[],[1],[0]]
obj = MyLinkedList()
obj.addAtHead(1)
assert obj.debug_print() == [1]
obj.deleteAtIndex(0)
assert obj.debug_print() == []
# ["MyLinkedList","addAtIndex","addAtIndex","addAtIndex","get"]
# [[],[0,10],[0,20],[1,30],[0]]
obj = MyLinkedList()
obj.addAtIndex(0, 10)
assert obj.debug_print() == [10]
obj.addAtIndex(0, 20)
assert obj.debug_print() == [20, 10]
obj.addAtIndex(1, 30)
assert obj.debug_print() == [20, 30, 10]
assert obj.get(0) == 20
# ["MyLinkedList","addAtHead","deleteAtIndex","addAtHead","addAtHead",
# "addAtHead","addAtHead","addAtHead","addAtTail","get","deleteAtIndex","deleteAtIndex"]
# [[],[2],[1],[2],[7],[3],[2],[5],[5],[5],[6],[4]]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/744-FindSmallestLetterGreaterThanTarget.py
```python
from typing import List
class Solution:
def nextGreatestLetter(self, letters: List[str], target: str) -> str:
"""
165 / 165 test cases passed.
Status: Accepted
Runtime: 136 ms
Memory Usage: 14.9 MB
2 <= letters.length <= 10^4
letters[i] is a lowercase English letter.
letters is sorted in non-decreasing order.
letters contains at least two different characters.
target is a lowercase English letter.
:param letters:
:param target:
:return:
"""
if target >= letters[-1]:
return letters[0]
low, high = 0, len(letters) - 1
while low + 1 < high:
mid = (low + high) // 2
if letters[mid] <= target:
low = mid
else:
high = mid
# low + 1 == high
if letters[low] > target:
return letters[low]
return letters[high] if letters[high] != target else letters[high + 1]
def test():
assert Solution().nextGreatestLetter(letters=["e", "e", "e", "e", "e", "e", "n", "n", "n", "n"], target="e") == "n"
assert Solution().nextGreatestLetter(letters=["c", "f", "j"], target="a") == "c"
assert Solution().nextGreatestLetter(letters=["c", "f", "j"], target="c") == 'f'
assert Solution().nextGreatestLetter(letters=["c", "f", "j"], target="d") == 'f'
assert Solution().nextGreatestLetter(letters=["c", "f", "j"], target="f") == 'j'
assert Solution().nextGreatestLetter(letters=["c", "f", "j"], target="j") == 'c'
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/746-MinCostClimbingStairs.py
```python
from typing import List
class Solution:
def minCostClimbingStairs(self, cost: List[int]) -> int:
"""
283 / 283 test cases passed.
Status: Accepted
Runtime: 56 ms
Memory Usage: 14.4 MB
:param cost:
:return:
"""
if len(cost) == 1:
return 0
if len(cost) == 2:
return min(cost)
dp = [0] * len(cost)
dp[0] = cost[0]
dp[1] = cost[1]
for i in range(2, len(cost)):
dp[i] = cost[i] + min(dp[i - 1], dp[i - 2])
return min(dp[-1], dp[-2])
def test():
assert Solution().minCostClimbingStairs(cost=[10, 15, 20]) == 15
assert Solution().minCostClimbingStairs(cost=[1, 100, 1, 1, 1, 100, 1, 1, 100, 1]) == 6
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/779-K-thSymbolInGrammar.py
```python
import math
class Solution:
def kthGrammar(self, N: int, K: int) -> int:
# Do some tricks
# The first always be 0
if K == 1:
return 0
last = self.kthGrammar(N - 1, math.ceil(K / 2))
if K % 2 == 1:
return 0 if last == 0 else 1
else:
return 1 if last == 0 else 0
def test():
assert Solution().kthGrammar(N=1, K=1) == 0
assert Solution().kthGrammar(N=2, K=1) == 0
assert Solution().kthGrammar(N=2, K=2) == 1
assert Solution().kthGrammar(N=4, K=5) == 1
if __name__ == '__main__':
test()
# print(Solution().kthGrammar(N=30, K=2))
```
#### File: myleetcode/src/915-PartitionArrayintoDisjointIntervals.py
```python
from typing import List
class Solution:
def partitionDisjoint(self, nums: List[int]) -> int:
l_max = tmp_max = nums[0]
l_len = 1
going_compare = False
for i in range(1, len(nums)):
if l_max > nums[i]:
if going_compare:
# found a right value less than left
l_max = tmp_max
going_compare = False
l_len = i + 1
else:
going_compare = True
tmp_max = max(nums[i], tmp_max)
return l_len
def test():
assert Solution().partitionDisjoint([24, 11, 49, 80, 63, 8, 61, 22, 73, 85]) == 9
assert Solution().partitionDisjoint([5, 0, 3, 8, 6]) == 3
assert Solution().partitionDisjoint([1, 2]) == 1
assert Solution().partitionDisjoint([2, 2]) == 1
assert Solution().partitionDisjoint([5, 0, 3, 8, 4, 6, 9]) == 6
assert Solution().partitionDisjoint([1, 1, 1, 0, 6, 12]) == 4
assert Solution().partitionDisjoint([5, 4, 3, 2, 8]) == 4
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/922-SortArrayByParityII.py
```python
from typing import List
from tool import print_results
class Solution:
@print_results
def sortArrayByParityII(self, nums: List[int]) -> List[int]:
"""
61 / 61 test cases passed.
Status: Accepted
Runtime: 220 ms
Memory Usage: 16.2 MB
:param nums:
:return:
"""
j = 1
for i in range(0, len(nums), 2):
if nums[i] % 2 == 0:
continue
while j < len(nums) and nums[j] % 2 == 1:
j += 2
nums[i], nums[j] = nums[j], nums[i]
return nums
def test():
ans = [[4, 5, 2, 7], [2, 5, 4, 7], [2, 7, 4, 5]]
assert Solution().sortArrayByParityII(nums=[4, 2, 5, 7]) in ans
assert Solution().sortArrayByParityII(nums=[2, 3]) == [2, 3]
assert Solution().sortArrayByParityII(nums=[2, 3, 1, 1, 4, 0, 0, 4, 3, 3])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/929-UniqueEmailAddresses.py
```python
from typing import List
class Solution:
def numUniqueEmails(self, emails: List[str]) -> int:
"""
183 / 183 test cases passed.
Status: Accepted
Runtime: 48 ms
Memory Usage: 14.3 MB
:param emails:
:return:
"""
email_set = set()
for email in emails:
p_index = email.find('+')
at_index = email.index('@')
if p_index > -1:
email_set.add(email[:p_index].replace('.', '') + email[at_index:])
else:
email_set.add(email[:at_index].replace('.', '') + email[at_index:])
print(email_set)
return len(email_set)
def test():
assert Solution().numUniqueEmails(["<EMAIL>", "<EMAIL>"]) == 1
assert Solution().numUniqueEmails(
emails=["<EMAIL>",
"<EMAIL>+<EMAIL>",
"<EMAIL>"]) == 2
assert Solution().numUniqueEmails(emails=["<EMAIL>", "<EMAIL>", "<EMAIL>"]) == 3
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/954-ArrayofDoubledPairs.py
```python
from typing import List
class Solution:
def canReorderDoubled(self, arr: List[int]) -> bool:
"""
102 / 102 test cases passed.
Status: Accepted
Runtime: 7000 ms
Memory Usage: 16.9 MB
My solution is spend two much time at list operations.
The leetcode solution 1 is:
```python
count = collections.Counter(A)
for x in sorted(A, key = abs):
if count[x] == 0: continue
if count[2*x] == 0: return False
count[x] -= 1
count[2*x] -= 1
```
return True
:param arr:
:return:
"""
arr = sorted(arr)
# we remove two elements in a iteration
for _ in range(len(arr) // 2):
i = arr.pop(0)
if i < 0:
m, n = divmod(i, 2)
if n == 1 or m not in arr:
return False
else:
arr.remove(m)
else:
if 2 * i not in arr:
return False
else:
arr.remove(2 * i)
return True
def test():
assert not Solution().canReorderDoubled(arr=[3, 1, 3, 6])
assert not Solution().canReorderDoubled(arr=[2, 1, 2, 6])
assert Solution().canReorderDoubled(arr=[4, -2, 2, -4])
assert not Solution().canReorderDoubled(arr=[1, 2, 4, 16, 8, 4])
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/977-SquaresOfASortedArray.py
```python
from typing import List
class Solution:
def sortedSquares(self, A: List[int]) -> List[int]:
return sorted([x * x for x in A])
def test():
assert Solution().sortedSquares([-4, -1, 0, 3, 10]) == [0, 1, 9, 16, 100]
if __name__ == '__main__':
test()
```
#### File: myleetcode/src/993-CousinsinBinaryTree.py
```python
from typing import Optional
from tree_node import TreeNode, build_tree_node
class Solution:
def isCousins(self, root: Optional[TreeNode], x: int, y: int) -> bool:
"""
Runtime: 28 ms, faster than 93.30%
Memory Usage: 14.3 MB, less than 71.37%
The number of nodes in the tree is in the range [2, 100].
1 <= Node.val <= 100
Each node has a unique value.
x != y
x and y are exist in the tree.
:param root:
:param x:
:param y:
:return:
"""
if root.val == x or root.val == y:
return False
parent_list = [root]
cur_list = []
while len(parent_list) > 0:
cur_val_list = []
for node in parent_list:
if node.left is not None and node.right is not None:
if node.left.val in [x, y] and node.right.val in [x, y]:
return False
if node.left is not None:
cur_list.append(node.left)
cur_val_list.append(node.left.val)
if node.right is not None:
cur_list.append(node.right)
cur_val_list.append(node.right.val)
if x in cur_val_list:
if y in cur_val_list:
return True
else:
return False
if y in cur_val_list:
return False
parent_list = cur_list.copy()
cur_list = []
return False
def test():
null = None
assert not Solution().isCousins(root=build_tree_node([1, 2, 3, null, 4]), x=2, y=3)
assert not Solution().isCousins(root=build_tree_node([1, 2, 3, 4]), x=4, y=3)
assert Solution().isCousins(root=build_tree_node([1, 2, 3, null, 4, null, 5]), x=5, y=4)
if __name__ == '__main__':
test()
```
#### File: src/todo/152-MaximumProductSubarray.py
```python
from typing import List
class Solution:
def maxProduct(self, nums: List[int]) -> int:
pass
def test():
assert Solution().maxProduct(nums=[2, 3, -2, 4]) == 6
assert Solution().maxProduct(nums=[-2, 0, -1]) == 0
if __name__ == '__main__':
test()
```
#### File: src/todo/1986-MinimumNumberofWorkSessionstoFinishtheTasks.py
```python
from typing import List
class Solution:
def minSessions(self, tasks: List[int], sessionTime: int) -> int:
cnt = 0
tasks = sorted(tasks, reverse=True)
while len(tasks) > 0:
if tasks[0] == sessionTime:
tasks.remove(tasks[0])
cnt += 1
continue
if len(tasks) == 1:
cnt += 1
break
left = sessionTime - tasks[0]
tasks.remove(tasks[0])
if left in tasks:
tasks.remove(left)
cnt += 1
else:
to_remove = []
for task in tasks:
if task <= left:
left -= task
to_remove.append(task)
for r in to_remove:
tasks.remove(r)
cnt += 1
return cnt
def test():
assert Solution().minSessions(tasks=[2, 3, 3, 4, 4, 4, 5, 6, 7, 10], sessionTime=12) == 4
assert Solution().minSessions(tasks=[1], sessionTime=2) == 1
assert Solution().minSessions(tasks=[1, 2, 3], sessionTime=3) == 2
assert Solution().minSessions(tasks=[3, 1, 3, 1, 1], sessionTime=8) == 2
assert Solution().minSessions(tasks=[1, 2, 3, 4, 5], sessionTime=15) == 1
if __name__ == '__main__':
test()
```
#### File: src/todo/29-DivideTwoIntegers.py
```python
class Solution:
def divide(self, dividend: int, divisor: int) -> int:
"""
-2**31 <= dividend, divisor <= 2**31 - 1
divisor != 0
:param dividend:
:param divisor:
:return:
"""
if dividend == 0:
return 0
negative = False
# Turn all num to negative
if divisor < 0 and dividend > 0:
negative = True
dividend = -dividend
elif divisor > 0 and dividend < 0:
negative = True
divisor = -divisor
elif divisor > 0 and dividend > 0:
divisor = -divisor
dividend = -dividend
ret = 1
# all num are negative now
if dividend > divisor:
return 0
while divisor > dividend:
divisor += divisor
ret += 1
if negative and divisor == dividend:
return -ret
return -ret + 1 if negative else ret
def test():
assert Solution().divide(dividend=-1, divisor=2) == 0
assert Solution().divide(dividend=1, divisor=2) == 0
assert Solution().divide(dividend=-1, divisor=1) == -1
assert Solution().divide(dividend=10, divisor=3) == 3
assert Solution().divide(dividend=5, divisor=2) == 2
assert Solution().divide(dividend=7, divisor=-3) == -2
assert Solution().divide(dividend=0, divisor=-3) == 0
assert Solution().divide(dividend=1, divisor=1) == 1
assert Solution().divide(dividend=1, divisor=-1) == -1
assert Solution().divide(dividend=-1, divisor=-1) == 1
if __name__ == '__main__':
test()
```
#### File: src/todo/522-LongestUncommonSubsequenceII.py
```python
from typing import List
class Solution:
def findLUSlength(self, strs: List[str]) -> int:
pass
def test():
assert Solution().findLUSlength(strs=["aba", "cdc", "eae"]) == 3
assert Solution().findLUSlength(strs=["aaa", "aaa", "aa"]) == -1
if __name__ == '__main__':
test()
``` |
{
"source": "jifanz/ACED",
"score": 3
} |
#### File: ACED/src/algorithm.py
```python
import logging
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
from src.argmax_oracle import argmax_oracle_single
from src.dataset import *
from src.utils import *
from src.record import *
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def maxZ(z0, l_inv, shift, theta, eta, num_grid=40):
'''
Computes
max_h f(lambda; h; zeta)
Input:
z0: vector of dimension N_dim, tilde{h_k} in the paper.
l_inv: vector of dimension N_dim, 1 / lambda in the paper.
shift: a non-negative number, 2^{-k+1} in the paper.
theta: vector of dimension N_dim, hat{eta_{k - 1}} in the paper.
eta: vector of dimension N_dim, zeta in the paper.
tol: stopping criteria for search.
Output:
z: argmax vector, denoting the prediction of the hypothesis.
val: value of max_h f(lambda; h; zeta)
'''
seed = np.random.randint(1, 1e8)
def glinmax(r, return_pred=False, debug=False):
v = -np.sqrt(l_inv) * eta + r * theta
z = argmax_oracle_single(v, seed=seed, return_test_accuracy=False)
if z @ theta > z0 @ theta:
# Found better hypothesis than z0, so we replace it with the new hypothesis.
# This is necessary since we are using an approximate oracle where we relax the 0/1 loss.
raise ZException('Found Better', z)
val = z @ v
diff = val - r * (shift + theta @ z0) + np.sum(z0 * np.sqrt(l_inv) * eta)
frac = np.sum((z0 - z) * np.sqrt(l_inv) * eta) / (shift + theta @ (z0 - z))
if return_pred:
return diff, frac, z
return diff, frac
r = 100
factor = 10
rs = [r]
diffs = []
fracs = []
zs = []
diff, frac, z = glinmax(r, return_pred=True)
diffs.append(diff)
fracs.append(frac)
zs.append(z)
it = 0
while diff < 0 and it < num_grid:
r /= 2
diff, frac, z = glinmax(r, return_pred=True)
rs.append(r)
diffs.append(diff)
fracs.append(frac)
zs.append(z)
it += 1
for _ in range(num_grid - it):
diff, frac, z = glinmax(r, return_pred=True)
rs.append(r)
diffs.append(diff)
fracs.append(frac)
zs.append(z)
if diff > 0:
r *= factor
else:
r /= factor ** 2
factor /= 1.4
idx = np.argmax(np.array(fracs))
record = {'diffs': diffs, 'fracs': fracs, 'rs': rs}
return None, zs[idx], record
def mp_batch(maxz_params):
'''
Wrapper around maxZ that also records information for logging.
Input:
maxz_params: a dictionary from function argument name to values to be passed to maxZ.
'''
_, z, record = maxZ(**maxz_params)
record = {}
z0 = maxz_params['z0']
l_inv = maxz_params['l_inv']
shift = maxz_params['shift']
theta_k = maxz_params['theta']
eta = maxz_params['eta']
gamma_val = np.sum((z0 - z) * np.sqrt(l_inv) * eta) / (shift + (z0 - z) @ theta_k)
record['gamma_val'] = gamma_val
return gamma_val, z, eta, record
def eval_grad(z0, l_inv, shift, theta_k, eta_batch, pool):
'''
Computes
E [max_h f(lambda; h; zeta)]
where expectation is taken by averaging over eta_batch.shape[0] samples.
Input:
z0: vector of dimension N_dim, tilde{h_k} in the paper.
l_inv: vector of dimension N_dim, 1 / lambda in the paper.
shift: a non-negative number, 2^{-k+1} in the paper.
theta_k: vector of dimension N_dim, hat{eta_{k - 1}} in the paper.
eta_batch: array of (M, N_dim), each row corresponds to a sample of zeta in the paper.
pool: multiprocessing pool.
Output:
z: argmax vector, denoting the prediction of the hypothesis.
val: value of max_h f(lambda; h; zeta)
'''
finished = False
while not finished:
try:
results = pool.map(mp_batch, [{'z0': z0, 'l_inv': l_inv, 'shift': shift, 'theta': theta_k,
'eta': eta_batch[i, :], 'num_grid': 40}
for i in range(eta_batch.shape[0])])
finished = True
except ZException as e:
# Found better hypothesis than z0, so we replace it with the new hypothesis.
# This is necessary since we are using an approximate oracle where we relax the 0/1 loss.
print('resetted')
z0 = e.z
# Retrieve from results of parallel computation.
zs = []
records = []
gamma_vals = []
for (gamma_val, z, eta, record) in results:
gamma_vals.append(gamma_val)
zs.append(z)
records.append(record)
gamma_vals = np.array(gamma_vals)
zs = np.array(zs)
# Compute gradient wrt lambda.
grads = -1 / 2 / np.sqrt(N_dim) * (z0 - zs) * np.sqrt(l_inv) ** 3 * eta_batch / (
shift + (z0 - zs) @ theta_k.reshape((-1, 1)))
avg_val = np.mean(gamma_vals)
avg_val_var = np.mean(gamma_vals ** 2) - avg_val ** 2
avg_g = np.mean(grads, axis=0)
avg_g_var = np.mean(grads ** 2, axis=0) - avg_g ** 2
return avg_val, avg_val_var / eta_batch.shape[0], avg_g, avg_g_var / eta_batch.shape[0], z0, records, zs
def gamma_combi(z0, theta_k, k, B, shared_data, iters=N_it_gamma_combi, max_batch_size=800, min_batch_size=50,
max_lr=.1, min_lr=1e-5, eps_rel=0.2, eps_abs=1., visualize=False, recorder=None, trial=0, l=None):
'''
A function to minimize gamma* as a function of lambda.
Input:
z0: vector of dimension N_dim, tilde{h_k} in the paper.
theta_k: vector of dimension N_dim, hat{eta_{k - 1}} in the paper.
k: index number of iteration, k in the paper.
B: scaling factor for 2^{-k}, usually 2 * N_dim to match paper.
shared_data: dataset object to be passed into multiprocessing pool.
iters: total number of iterations (100 should be plenty for all purposes).
Batchsize (number of etas per iteration) is set adaptively in the algorithm. But will never exceed limits.
min_batch_size: can be as small as 1, default 10. Let algorithm grow it.
max_batch_size: should be a bit larger than gamma*. So maybe take this to be twice the total measurement budget
you plan to take.
Step size eta is set adaptively in the algorithm. But will never exceed limits.
min_eta: in my experiments just on thresholds, .001 was small enough and almost never used.
max_eta: in my experiments just on thresholds, 1.0 worked but could cause some lambdas to get suuuuuper small
which is not great. 0.1 seems good.
Stopping criteria. If max_batch_size is not large enough, or min_eta is not small enough, these stopping
conditions may not be met.
eps_rel: Uses confidence intervals to Stop when (Gamma(lambda) - Gamma(opt)) / Gamma(lambda) < eps_rel. That is,
Gamma(lam) < Gamma(opt)/(1 - eps_rel). I suggest a conservative value like eps_rel=0.2.
eps_abs: Uses confidence intervals to Stop when Gamma(lambda) - Gamma(opt) < eps_abs. This should be no smaller
than 1, but could be quite large, like 10.
'''
shift = 2. ** (-k) * B
if l is None:
l = np.ones(N_dim) / N_dim
kappa = np.sqrt(2 * np.log(10))
num_resets = 0
batch_size = min_batch_size
lr_candidates = 1. / (10 ** (np.arange(int(-np.log10(max_lr)), int(-np.log10(min_lr)) + 1)))
gamma_expectations = []
pool = mp.Pool(N_max_processes, initializer=init_worker, initargs=shared_data)
writer = SummaryWriter('runs/active_{}'.format(trial))
l_inv = 1 / np.clip(l, a_min=1e-8, a_max=None)
while True:
eta_batch = np.random.randn(batch_size, N_dim)
avg_val, avg_val_var, avg_g, avg_g_var, z0, records, zs = eval_grad(z0, l_inv, shift, theta_k, eta_batch, pool)
subopt_gap = np.dot(avg_g, l) - np.min(avg_g)
if subopt_gap < kappa * np.sqrt(max(avg_g_var)) and 2 * batch_size <= max_batch_size:
batch_size *= 2
print("New batch size:", batch_size)
else:
break
grads = []
grad_norms = []
for t in range(iters):
old_z0 = z0
if recorder is not None:
recorder.set_level('maxZ_{}'.format(t))
subopt_gap = avg_g @ l - np.min(avg_g)
grads.append(avg_g)
grad = avg_g - np.min(avg_g)
grad_norms.append(np.linalg.norm(avg_g - np.mean(avg_g)))
flag = True
for j, lr in enumerate(lr_candidates):
eta_batch = np.random.randn(batch_size, N_dim)
lbd = l * np.exp(-lr * grad)
lbd = np.clip(lbd, a_min=1e-8, a_max=1)
lbd /= np.sum(lbd)
l_inv = 1 / np.clip(lbd, a_min=1e-8, a_max=None)
if np.sum(np.isnan(l_inv)):
continue
avg_val, avg_val_var, avg_g, avg_g_var, z0, records, zs = eval_grad(z0, l_inv, shift, theta_k, eta_batch,
pool)
if flag or avg_val + np.sqrt(avg_val_var) < best_tuple[0] - np.sqrt(best_tuple[1]):
flag = False
best_tuple = (avg_val, avg_val_var, avg_g, avg_g_var, lbd, records, zs)
else:
break
avg_val, avg_val_var, avg_g, avg_g_var, l, records, zs = best_tuple
if subopt_gap < kappa * np.sqrt(np.max(avg_g_var)):
if 2 * batch_size <= max_batch_size:
batch_size *= 2
# Logging
gamma_expectations.append(avg_val)
if (t + 1) >= iters // 10 and (t + 1) % (iters // 10) == 0:
logging.info('gamma_combi: iter {}'.format(t + 1))
writer.add_scalar('Gamma_Combi_{}/gradient_norms'.format(k), np.linalg.norm(avg_g - np.mean(avg_g)), t)
writer.add_scalar('Gamma_Combi_{}/gamma_expectations'.format(k), gamma_expectations[-1], t)
if recorder is not None:
recorder.record_vars(['zs', 'record', 'etas'], [zs, records, eta_batch])
recorder.pop()
# Found better hypothesis, don't terminate.
if np.sum(z0 != old_z0) != 0:
continue
# Relative suboptimality gap stopping condition.
if (subopt_gap + kappa * np.sqrt(np.max(avg_g_var))) / (avg_val - kappa * np.sqrt(avg_val_var)) < eps_rel:
logging.info('Gamma_Combi finished after {} iterations'.format(t + 1))
break
# Absolute suboptimality gap stopping condition.
if subopt_gap + kappa * np.sqrt(np.max(avg_g_var)) < eps_abs:
logging.info('Gamma_Combi finished after {} iterations'.format(t + 1))
break
pool.close()
pool.join()
for i in range(len(l)):
writer.add_scalar('Gamma_Combi_{}/allocation'.format(k), l[i], i)
if recorder is not None:
recorder.record_vars(['ls', 'gamma_expectations', 'grads', 'grad_norms', 'num_resets'],
[l, gamma_expectations, grads, grad_norms, num_resets])
writer.close()
return l
def get_z0(theta_k, shared_data, num=100):
'''
More robust computation of argmax h <w, h>.
'''
pool = mp.Pool(N_max_processes, initializer=init_worker, initargs=shared_data)
result_lst = pool.map(argmax_oracle_single, [theta_k] + [
theta_k * np.random.choice([-1, 1], size=theta_k.shape[0], replace=True, p=[.01, .99]) for _ in range(num - 1)])
pool.close()
pool.join()
z0_lst = [result[0] for result in result_lst]
return result_lst[np.argmax(np.array(z0_lst) @ theta_k)]
def combi_alg(theta_star, z_star, schedule, it_run=0):
'''
Runs ACED.
Input:
theta_star: a vector of dimension N_dim with elements in {-1, 1}, 2 * h_star - 1.
z_star: a vector of dimension N_dim with elements in {0, 1}, h_star in paper.
schedule: number of new queries to take for each iteration.
it_run: index the number of runs of the algorithm (for averaging over multiple runs).
Output:
z0: best arm
lk: allocation
thetak: empirically computed theta estimate
np.sum(pulls): total number of queries
'''
shared_data = get_shared_data()
init_worker(*shared_data)
theta_k, theta_sum = 2 * np.random.rand(N_dim) - 1, 0
B = 2 * N_dim
pulls = np.zeros(N_dim)
labels = np.zeros(N_dim)
lk = np.ones(N_dim, dtype=float) / N_dim
writer = SummaryWriter('runs/active_{}'.format(it_run))
global_record = Recorder(data_name, model_name, f'combi_alg_global', idx=it_run)
z0, _ = get_z0(theta_k, shared_data)
prob_sum = np.zeros(N_dim, dtype=float)
pk = lk
for t in range(1, N_dim + 1):
if t in schedule:
k = schedule.index(t)
recorder = Recorder(data_name, model_name, f'combi_alg_run_{it_run}', idx=k)
logging.info('combi_alg: entering gamma_combi in round {}'.format(k))
recorder.set_level('gamma_combi')
lk = gamma_combi(z0, theta_k, k, B, shared_data, iters=N_it_gamma_combi, recorder=recorder,
trial=it_run, l=lk, min_batch_size=125, max_batch_size=2000)
logging.info('combi_alg: got gamma')
recorder.pop()
recorder.save()
del recorder
# Water filling.
p_left = 1.0
if k != len(schedule) - 1:
p_diff = (lk * (schedule[k + 1] - 1) - prob_sum) / (schedule[k + 1] - t)
diff_sorted = -np.sort(-p_diff)
pk = np.zeros(N_dim)
for ind in range(N_dim - 1):
diff = diff_sorted[ind] - diff_sorted[ind + 1]
if p_left > diff * (ind + 1):
pk[p_diff >= diff_sorted[ind]] += diff
p_left -= diff * (ind + 1)
else:
pk[p_diff >= diff_sorted[ind]] += p_left / (ind + 1)
break
if np.sum(pk) < 1:
pk = pk + (1. - np.sum(pk)) / N_dim
if not np.allclose(np.sum(pk), 1):
print(p_diff)
print(diff_sorted)
print(lk)
print(pk)
assert np.allclose(np.sum(pk), 1), "pk not summing to 1 (sum = {}), {}, {}".format(np.sum(pk), lk, pk)
pk += 1e-8
pk /= np.sum(pk)
prob_sum += pk
# Query based on pk.
idx = np.random.choice(np.arange(N_dim)[pulls == 0], 1, p=pk[pulls == 0] / np.sum(pk[pulls == 0]))
labels[idx] = theta_star[idx]
pulls[idx] = 1
theta_k = labels
# Logging.
if t in schedule:
for ind in range(N_dim):
writer.add_scalar('Gamma_Combi_{}/water_filling'.format(k), pk[ind], ind)
logger.info(
'combi_alg: total pulls up to this round {}, total seen this round {}'.format(sum(pulls), t))
logger.info('combi_alg: positive labels seen {}/{}'.format(sum(labels == 1), sum((theta_star + 1) / 2)))
# Update the best estimated hypothesis so far.
z0, test_accuracy_z0 = get_z0(theta_k, shared_data)
pred, test_accuracy_retrain = argmax_oracle_single(labels)
accuracy_retrain = np.sum(pred == z_star) / float(N_dim)
accuracy_z0 = np.sum(z0 == z_star) / float(N_dim)
writer.add_scalar('Accuracy/results/retrain', accuracy_retrain, t)
writer.add_scalar('Accuracy/results/z0', accuracy_z0, t)
writer.add_scalar('Accuracy/results/retrain_test', test_accuracy_retrain, t)
writer.add_scalar('Accuracy/results/z0_test', test_accuracy_z0, t)
print("Accuracy after round %d: retrain %f z0: %f" % (k, accuracy_retrain, accuracy_z0))
global_record.append_vars(
['lk', 'pk', 'pulls', 'theta_k', 'z0', 'pred', 'accuracy_retrain', 'accuracy_z0', 'z_star',
'theta_star', 'labels', 'test_accuracy_retrain', 'test_accuracy_z0'],
[lk, pk, np.array(pulls), theta_k, z0, pred, accuracy_retrain, accuracy_z0, z_star, theta_star,
labels, test_accuracy_retrain, test_accuracy_z0])
global_record.save()
elif (t + 1) in schedule:
z0, _ = get_z0(theta_k, shared_data)
global_record.record_var("schedule", schedule)
global_record.save()
writer.close()
return z0, lk, theta_k, np.sum(pulls)
if __name__ == "__main__":
np.random.seed(111)
mp.set_start_method('spawn')
shared_data = get_shared_data()
init_worker(*shared_data)
dataset = get_dataset()
for trial in range(1):
print("************ Starting Active Run #%d *************" % trial)
z_star = dataset["Y"].numpy()
result = combi_alg(z_star * 2 - 1, z_star, list(np.arange(250, N_dim, step=250)) + [N_dim], it_run=trial)
```
#### File: ACED/src/utils.py
```python
class ZException(Exception):
'''
Exception when z0 is suboptimal.
'''
def __init__(self, text, z, *args):
super(ZException, self).__init__(text, z, *args)
self.text = text
self.z = z
``` |
{
"source": "Jifan-Zhang/Ideas",
"score": 3
} |
#### File: Ideas/color-gradient/Discriminator.py
```python
import tensorflow as tf
from tensorflow.keras import Model
from Model_builder import Model_builder
from Generator import Generate
class Discriminator(Model_builder):
def __init__(self, input_shape):
super(Discriminator, self).__init__(input_shape, (1,))
def set_painter(self, Gen):
self.Gen = Gen
def build(self):
"""
Create model
"""
self.model = Model(inputs=self.blocks[0],outputs=self.blocks[-1])
return self.model
def __loss__(self, real_out, fake_out):
return fake_out - real_out
def __get_pred__(self, path="color-gradient.jpg"):
# fake inputs
x = tf.random.uniform(minval=0, maxval=1, shape=(1,)+self.Gen.input_shape, dtype=tf.float32)
if(self.Gen.monitored):
fake_inputs = self.Gen.model(x)[-1]
else:
fake_inputs = self.Gen.model(x)
fake_out = self.model(fake_inputs)
# real inputs
real_inputs = next(Generate("color-gradient.jpg", self.Gen.output_shape))
real_out = self.model(real_inputs)
return (real_out, fake_out)
def get_gradient(self):
with tf.GradientTape() as tape:
real_out, fake_out = self.__get_pred__()
loss = self.__loss__(real_out, fake_out)
self.__current_loss__ = loss.numpy()[0]
return tape.gradient(loss, self.model.trainable_weights)
```
#### File: Ideas/color-gradient/Model_builder.py
```python
import tensorflow as tf
from tensorflow.keras import Model
import tensorflow.keras.backend as K
import numpy as np
class Model_builder:
"""
A model layers wrapper, building layer blocks.
The instance variable remembers the layer structure.
input_shape: The image shape CNN trains on
"""
def __init__(self, input_shape = (1024,1024,3), output_shape = (512,512,3)):
self.input_shape = input_shape
self.output_shape = output_shape
self.blocks = []
def __bottle_neck_check__(f):
def inner(self, *args, **kwargs):
f(self, *args, **kwargs)
if(self.__dict__.get("Discriminator")): # Only painter checks bottle neck
x = self.blocks[-1]
if(x.get_shape()[1]<self.output_shape[0] or x.get_shape()[2]<self.output_shape[1]):
raise RuntimeError(f"The model has formed a bottle neck structure {(x.get_shape()[1],x.get_shape()[2])} < {(self.output_shape[0], self.output_shape[1])}, which should be recovered with up sampling, which is not implemented in the version.")
return inner
def input(self):
out = tf.keras.Input(shape=self.input_shape)
self.blocks.append(out)
""""""
if(self.__dict__.get("Discriminator") is None): # Discriminator scales input (Only painter stores discriminator in its instance)
out = out/255.
self.blocks.append(out)
return out
@__bottle_neck_check__
def conv_block(self, n_filter=5, filter_size=(3,3), padding = "valid", strides=1):
x = self.blocks[-1]
out = tf.keras.layers.Conv2D(n_filter, filter_size, strides=strides, padding = padding, activation="selu")(x)
self.blocks.append(out)
return out
@__bottle_neck_check__
def pooling_block(self, strides=(2,2)):
x = self.blocks[-1]
out = tf.keras.layers.MaxPool2D()(x)
self.blocks.append(out)
return out
@__bottle_neck_check__
def conv_pool_block(self, n_filter=5, filter_size=(3,3), strides=1):
x = self.blocks[-1]
x = tf.keras.layers.Conv2D(n_filter, filter_size, strides=strides, padding = "same",activation="selu")(x)
out = tf.keras.layers.MaxPool2D()(x)
self.blocks.append(out)
return out
def fully_connected(self,n):
x = self.blocks[-1]
if(len(x.get_shape())!=2):
x = tf.keras.layers.Flatten()(x)
if(n==2):
activation = "softmax"
elif(n==1):
activation = "linear"
else:
activation = "selu"
out = tf.keras.layers.Dense(n, activation=activation)(x)
self.blocks.append(out)
return out
def top_block(self):
x = self.blocks[-1]
width = x.get_shape()[1]
height = x.get_shape()[2]
f_width = width + 1 - self.output_shape[0]
f_height = height + 1 - self.output_shape[1]
out = tf.keras.layers.Conv2D(3, (f_width,f_height) ,padding="valid", activation = "selu")(x)
self.blocks.append(out)
return out
``` |
{
"source": "jifegg/yafblog",
"score": 2
} |
#### File: yafblog/lib/myrenderer.py
```python
import mistune
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
class MyRenderer(mistune.Renderer):
def block_code(self, text, lang):
linenos = inlinestyles = False
if not lang:
text = text.strip()
return u'<pre><code>%s</code></pre>\n' % mistune.escape(text)
try:
lexer = get_lexer_by_name(lang, stripall=True)
formatter = HtmlFormatter(
noclasses=inlinestyles, linenos=linenos, cssclass='codehilite'
)
code = highlight(text, lexer, formatter)
if linenos:
return '<div class="highlight-wrapper">%s</div>\n' % code
return '<div class="doc doc-code">%s</div>%s' % (lang.upper(), code)
except:
return '<pre class="%s"><code>%s</code></pre>\n' % (
lang, mistune.escape(text)
)
def link(self, link, title, text):
link = mistune.escape_link(link)
if not title:
return '<a href="%s" target="_blank">%s</a>' % (link, text)
title = escape(title, quote=True)
return '<a href="%s" title="%s" target="_blank">%s</a>' % (link, title, text)
def header(self, text, level, raw=None):
rv = '<h%d id="toc-%d">%s</h%d>\n' % (
level, self.toc_count, text, level
)
self.toc_tree.append((self.toc_count, text, level, raw))
self.toc_count += 1
return rv
def reset_toc(self):
self.toc_tree = []
self.toc_count = 0
def render_toc(self, level=3):
"""Render TOC to HTML.
:param level: render toc to the given level
"""
return ''.join(self._iter_toc(level))
def _iter_toc(self, level):
first_level = None
last_level = None
yield '<ul id="table-of-content">\n'
for toc in self.toc_tree:
index, text, l, raw = toc
if l > level or l < 2:
# ignore this level
continue
if first_level is None:
# based on first level
first_level = l
last_level = l
yield '<li><a href="#toc-%d">%s</a>' % (index, text)
elif last_level == l:
yield '</li>\n<li><a href="#toc-%d">%s</a>' % (index, text)
elif last_level == l - 1:
last_level = l
yield '<ul>\n<li><a href="#toc-%d">%s</a>' % (index, text)
elif last_level > l:
# close indention
yield '</li>'
while last_level > l:
yield '</ul>\n</li>\n'
last_level -= 1
yield '<li><a href="#toc-%d">%s</a>' % (index, text)
# close tags
if first_level and last_level:
yield '</li>\n'
while last_level > first_level:
yield '</ul>\n</li>\n'
last_level -= 1
yield '</ul>\n'
``` |
{
"source": "jifengting1/fastpliFork",
"score": 3
} |
#### File: fastpli/analysis/affine_transformation.py
```python
import numpy as np
import scipy.interpolate
import numba
def _replace_mat_row(B, r, d):
return np.linalg.det(np.delete(np.vstack([r, B]), (d + 1), axis=0))
@numba.njit(cache=True)
def _nearest_neighbors(image, M):
""" written for simpli images[x,y,rho]
"""
image = np.atleast_3d(image)
image_nn = np.empty_like(image)
M = np.ascontiguousarray(np.linalg.inv(M))
x_max, y_max = image.shape[0] - 1, image.shape[1] - 1
for i in range(image.shape[0]):
for j in range(image.shape[1]):
x, y, _ = M @ np.array([i, j, 1.0])
ii = max(0, min(x_max, int(np.rint(x))))
jj = max(0, min(y_max, int(np.rint(y))))
image_nn[i, j, :] = image[ii, jj, :]
return image_nn
def _interpolate_griddata(image, M, mode):
""" written for simpli images[x,y,rho]
"""
image = np.atleast_3d(image)
image_nn = np.empty_like(image)
grid_i, grid_j = np.mgrid[0:image.shape[0], 0:image.shape[1]]
# points -> coordinates in transformed image
points = np.array(
[grid_i.flatten(),
grid_j.flatten(),
np.ones(grid_j.size)])
points = (M @ points)[0:2, :]
for k in range(image.shape[2]):
image_nn[:, :, k] = scipy.interpolate.griddata(points.T,
image[:, :, k].flatten(),
(grid_i, grid_j),
method=mode)
return image_nn
def calc_matrix(p_in, p_out):
"""
Calculate the affine transformation matrix.
Parameters
----------
p_in, p_out : (3,2)-array_like
list of 3 x 2d points which will be transformed from p_in to p_out
Returns
-------
res : (3x3)-array
affine transformation matrix
"""
p_in = np.array(p_in)
p_out = np.array(p_out)
if not np.all(np.equal(np.array(p_in.shape), np.array(p_out.shape))):
raise TypeError("in and out not the same shape")
if not np.all(np.equal(np.array(p_in.shape), np.array([3, 2]))):
print(p_in.shape)
raise TypeError("shape error: input required [3x2], [3x2]")
l = p_in.shape[0]
B = np.vstack([np.transpose(p_in), np.ones(l)])
D = 1.0 / np.linalg.det(B)
M = np.array([[(-1)**i * D * _replace_mat_row(B, R, i)
for i in range(l)]
for R in np.transpose(p_out)])
return np.vstack([M, [0, 0, 1]])
def exec_matrix(M, x, y):
"""
Execute the affine transformation.
Parameters
----------
M : (3,3)-array
affine transformation matrix
x, y : float
2d coordinates to transform
Returns
-------
res : float, float
transformed coordinates
"""
x, y, _ = M @ np.array([x, y, 1.0])
return x, y
def image(image, M, mode='nearest'):
"""
Execute the affine transformation on simpli images[x,y,rho].
Parameters
----------
image : 2d-array
image to transform
M : float
affine transformation matrix
mode : str
"nearest", "linear", "cubic" interpolation mode
Returns
-------
res : 2d-array
transformed image
"""
if mode == 'nearest':
# this is faster then scipy.interpolate.griddata('nearest')
new_image = _nearest_neighbors(image, M)
elif mode == 'linear' or mode == 'cubic':
new_image = _interpolate_griddata(image, M, mode)
else:
raise ValueError(f"mode \"{mode}\" does not exist")
return np.squeeze(new_image)
```
#### File: fastpli/objects/fiber_bundle.py
```python
import numpy as np
import copy
from . import fiber
def Rescale(fiber_bundle, scale, mod='all'):
"""
Rescales fiber_bundle
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
scale : float
scale factor
mod : str, optional
'all', 'points' or 'radii' will be scaled
Returns
-------
res : [(,4)-array, ...]
scaled fiber_bundle
"""
fiber_bundle = copy.deepcopy(fiber_bundle)
for i, _ in enumerate(fiber_bundle):
fiber_bundle[i] = fiber.Rescale(fiber_bundle[i], scale, mod)
return fiber_bundle
def Rotate(fiber_bundle, rot, offset=None):
"""
Rotates fiber_bundle around offset
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
rot : (3,3)-array_like
scale factor
offset : 3d-array-array_like, optional
offset for rotation center
Returns
-------
res : [(,4)-array, ...]
rotated fiber_bundle
"""
fiber_bundle = copy.deepcopy(fiber_bundle)
rot = np.array(rot, copy=False)
if offset is not None:
offset = np.array(offset, copy=False)
for i, _ in enumerate(fiber_bundle):
fiber_bundle[i] = fiber.Rotate(fiber_bundle[i], rot, offset)
return fiber_bundle
def Translate(fiber_bundle, offset):
"""
Translates fiber_bundle
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
offset : 3d-array-array_like
offset to translate
Returns
-------
res : [(,4)-array, ...]
translated fiber_bundle
"""
fiber_bundle = copy.deepcopy(fiber_bundle)
offset = np.array(offset, copy=False)
for i, _ in enumerate(fiber_bundle):
fiber_bundle[i] = fiber.Translate(fiber_bundle[i], offset)
return fiber_bundle
def ApplyFun(fiber_bundle, fun):
"""
Applies function to fibers
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
fun : function
Returns
-------
res : [(,4)-array, ...]
translated fiber_bundle
"""
fiber_bundle = copy.deepcopy(fiber_bundle)
for i, _ in enumerate(fiber_bundle):
fiber_bundle[i] = fun(fiber_bundle[i])
return fiber_bundle
def ApplyFunToPosition(fiber_bundle, fun):
"""
Applies function to fibers positions
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
fun : function
Returns
-------
res : [(,4)-array, ...]
translated fiber_bundle
"""
fiber_bundle = copy.deepcopy(fiber_bundle)
for i, _ in enumerate(fiber_bundle):
fiber_bundle[i][:, :-1] = fun(fiber_bundle[i][:, :-1])
return fiber_bundle
def ApplyFunToRadii(fiber_bundle, fun):
"""
Applies function to fibers radii
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
fun : function
Returns
-------
res : [(,4)-array, ...]
translated fiber_bundle
"""
fiber_bundle = copy.deepcopy(fiber_bundle)
for i, _ in enumerate(fiber_bundle):
fiber_bundle[i][:, -1] = fun(fiber_bundle[i][:, -1])
return fiber_bundle
def Cut(fiber_bundle, voi):
"""
Cut fiber into voi. The cutting process can create multiple fibers.
It checks every cone_aabb if it overlapps with the voi.
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
voi : [xmin, ymin, zmin],[xmax,ymax,zmax]
Volume of interest of which fibers to include. E.g. same as in
Simulation
Returns
-------
res : [(,4)-array, ...]
cutted fiber_bundle
"""
new_fiber_bundle = []
for f in fiber_bundle:
new_fiber_bundle.extend(fiber.Cut(f, voi))
return new_fiber_bundle
def CutSphere(fiber_bundle, radius, center=[0, 0, 0]):
"""
Cut fiber into sphere. The cutting process can create multiple fibers.
It checks every cone_aabb if it overlapps with the sphere.
Parameters
----------
fiber_bundle : [(,4)-array, ...]
list of fibers
radius : float
radius of cutting sphere
center : 3d-array
center of cutting sphere
Returns
-------
res : [(,4)-array, ...]
cutted fiber_bundle
"""
new_fiber_bundle = []
for f in fiber_bundle:
new_fiber_bundle.extend(fiber.CutSphere(f, radius, center))
return new_fiber_bundle
```
#### File: fastpli/objects/fiber.py
```python
import numpy as np
import numba
def Rescale(fiber, scale, mod='all'):
"""
Rescales fiber
Parameters
----------
fiber : (,4)-array
fiber
scale : float
scale factor
mod : str, optional
'all', 'points' or 'radii' will be scaled
Returns
-------
res : (,4)-array
scaled fiber
"""
fiber = np.array(fiber, copy=True)
if mod == 'all':
fiber *= scale
elif mod == 'points':
fiber[:, :3] *= scale
elif mod == 'radii':
fiber[:, -1] *= scale
else:
raise ValueError('mod = [all, points, radii]')
return fiber
def Rotate(fiber, rot, offset=None):
"""
Rotates fiber around offset
Parameters
----------
fiber : (,4)-array
fiber
rot : (3,3)-array_like
scale factor
offset : 3d-array-array_like, optional
offset for rotation center
Returns
-------
res : (,4)-array
rotated fiber
"""
rot = np.array(rot, copy=False)
fiber = np.array(fiber, copy=True)
if offset is None:
fiber[:, :3] = np.dot(rot, fiber[:, :3].T).T
else:
offset = np.array(offset, copy=False)
fiber[:, :3] = np.dot(rot, (fiber[:, :3] - offset).T).T + offset
return fiber
def Translate(fiber, offset):
"""
Translates fiber
Parameters
----------
fiber : (,4)-array
fiber
offset : 3d-array-array_like
offset to translate
Returns
-------
res : (,4)-array
translated fiber
"""
fiber = np.array(fiber, copy=True)
offset = np.array(offset, copy=False)
fiber[:, :3] += offset
return fiber
@numba.njit(cache=True)
def _cone_aabb_in_aabb(c0, c1, vmin, vmax):
c_min = np.array([
min(c0[0] - c0[-1], c1[0] - c1[-1]),
min(c0[1] - c0[-1], c1[1] - c1[-1]),
min(c0[2] - c0[-1], c1[2] - c1[-1])
])
c_max = np.array([
max(c0[0] + c0[-1], c1[0] + c1[-1]),
max(c0[1] + c0[-1], c1[1] + c1[-1]),
max(c0[2] + c0[-1], c1[2] + c1[-1])
])
for i in range(3):
if c_min[i] > vmax[i] or c_max[i] < vmin[i]:
return False
return True
@numba.njit(cache=True)
def _cone_aabb_in_sphere(c0, c1, r, center):
c_min = np.array([
min(c0[0] - c0[-1], c1[0] - c1[-1]),
min(c0[1] - c0[-1], c1[1] - c1[-1]),
min(c0[2] - c0[-1], c1[2] - c1[-1])
])
c_max = np.array([
max(c0[0] + c0[-1], c1[0] + c1[-1]),
max(c0[1] + c0[-1], c1[1] + c1[-1]),
max(c0[2] + c0[-1], c1[2] + c1[-1])
])
dmin = 0
for i in range(3):
if center[i] < c_min[i]:
dmin += (center[i] - c_min[i])**2
elif center[i] > c_max[i]:
dmin += (center[i] - c_max[i])**2
return dmin <= r**2
def Cut(fiber, voi):
"""
Cut fiber into voi. The cutting process can create multiple fibers.
It checks every cone_aabb if it overlapps with the voi.
Parameters
----------
fiber : (,4)-array
fiber
voi : [xmin, ymin, zmin],[xmax,ymax,zmax]
Volume of interest of which fibers to include. E.g. same as in
Simulation
Returns
-------
res : [(,4)-array]
cut fiber(s)
"""
fibers = []
fiber = np.array(fiber, copy=False)
if fiber.ndim != 2:
raise (TypeError, "False fiber shape")
start = 0
voi = np.array(voi)
for i in range(fiber.shape[0] - 1):
if not _cone_aabb_in_aabb(fiber[i, :], fiber[i + 1, :], voi[0], voi[1]):
if start != i:
fibers.append(fiber[start:i + 1])
start = i + 1
if start != i + 1:
fibers.append(fiber[start:])
return fibers
def CutSphere(fiber, radius, center=[0, 0, 0]):
"""
Cut fiber into sphere. The cutting process can create multiple fibers.
It checks every cone_aabb if it overlapps with the sphere.
Parameters
----------
fiber : (,4)-array
fiber
radius : float
radius of cutting sphere
center : 3d-array
center of cutting sphere
Returns
-------
res : [[(,4)-array, ...]]
cutted fiber_bundles
"""
center = np.array(center, copy=False)
fibers = []
fiber = np.array(fiber, copy=False)
if fiber.ndim != 2:
raise (TypeError, "False fiber shape")
start = 0
for i in range(fiber.shape[0] - 1):
if not _cone_aabb_in_sphere(fiber[i, :], fiber[i + 1, :], radius,
center):
if start != i:
fibers.append(fiber[start:i + 1])
start = i + 1
if start != i + 1:
fibers.append(fiber[start:])
return fibers
```
#### File: fastpli/tools/helper.py
```python
import glob
import os
from .. import __version__
def pip_freeze():
""" turns pip freeze into a string """
try:
from pip._internal.operations import freeze
except ImportError:
from pip.operations import freeze
return "\n".join(freeze.freeze())
def version_file_name(file_name):
""" Versions file name with .v{i}. Returns new file name with latest i """
file_path = os.path.dirname(file_name)
file_name = os.path.basename(file_name)
files = glob.glob(os.path.join(file_path, file_name + '*'))
def in_list(i, files):
for f in files:
if file_name + f".v{i}" in f:
return True
return False
i = 0
while in_list(i, files):
i += 1
return os.path.join(file_path, file_name + f".v{i}")
def version_path(path, name):
""" Versions folder name with .v{i}. Returns new path with latest i """
folders = [
p for p in os.listdir(path) if os.path.isdir(os.path.join(path, p))
]
def in_list(i, name):
for f in folders:
if name + f".v{i}" in f:
return True
return False
i = 0
while in_list(i, name):
i += 1
return os.path.join(path, name + f".v{i}")
```
#### File: model/sandbox/sandbox_test.py
```python
import unittest
import numpy as np
import fastpli.model.sandbox as sb
import fastpli.objects as obj
class MainTest(unittest.TestCase):
def setUp(self):
pass
def test_triangular_grid(self):
seeds = sb.seeds.triangular_grid(0, 0, 1, endpoint=False)
self.assertTrue(seeds.size == 0)
seeds = sb.seeds.triangular_grid(2, 0, 1, endpoint=False)
self.assertTrue(seeds.size == 0)
seeds = sb.seeds.triangular_grid(2, 0, 1, endpoint=True)
self.assertTrue(seeds.shape[0] == 3)
self.assertTrue(np.all(seeds[:, 1] == 0))
self.assertTrue(np.all(seeds[:, 0] == np.array([0, 1, 2])))
seeds = sb.seeds.triangular_grid(2, 2, 1, endpoint=True)
self.assertTrue(seeds.shape[0] == 8)
def test_crop_rectangle(self):
seeds = sb.seeds.triangular_grid(2, 2, 1)
new_seeds = sb.seeds.crop_rectangle(2, 2, seeds)
self.assertTrue(np.array_equal(seeds, new_seeds))
seeds = sb.seeds.triangular_grid(0, 0, 1)
new_seeds = sb.seeds.crop_rectangle(2, 2, seeds, 1)
self.assertTrue(new_seeds.size == 0)
new_seeds = sb.seeds.crop_rectangle([-1, -1], [1, 1], seeds, 0)
self.assertTrue(np.array_equal(new_seeds, [[0, 0]]))
seeds = sb.seeds.triangular_grid(2, 2, 1)
new_seeds = sb.seeds.crop_rectangle(2,
2,
seeds,
radii=[1] * seeds.shape[0])
self.assertTrue(new_seeds.size < seeds.size)
def test_crop_circle(self):
seeds = sb.seeds.triangular_grid(2, 2, 1)
new_seeds = sb.seeds.crop_circle(100, seeds)
self.assertTrue(np.array_equal(seeds, new_seeds))
new_seeds = sb.seeds.crop_circle(1, seeds)
self.assertTrue(
np.array_equal(new_seeds,
[[0, 0], [1, 0], [0.5, np.sqrt(3) / 2]]))
new_seeds = sb.seeds.crop_circle(np.sqrt(2), seeds, [1, 1])
self.assertTrue(np.array_equal(seeds, new_seeds))
new_seeds = sb.seeds.crop_circle(1, seeds, center=[0, 0], radii=1)
self.assertTrue(np.array_equal(new_seeds, [[0, 0]]))
new_seeds = sb.seeds.crop_circle(1,
seeds,
center=[0, 0],
radii=[1] * seeds.shape[0])
self.assertTrue(np.array_equal(new_seeds, [[0, 0]]))
def test_build_cylinder(self):
seeds = sb.seeds.triangular_circle(10, 1)
for m in ['p', 'r', 'c']:
sb.build.cylinder(p=(0, 0, 0),
q=(10, 10, 10),
r_in=5,
r_out=8,
seeds=seeds,
radii=1,
alpha=np.deg2rad(20),
beta=np.deg2rad(160),
mode=m)
self.assertTrue(True)
def test_build_cuboid(self):
p = np.array([0, 80, 50])
q = np.array([40, 180, 100])
d = np.max(np.abs(p - q)) * np.sqrt(3)
seeds = sb.seeds.triangular_grid(a=d, b=d, spacing=5, center=True)
sb.build.cuboid(p=p,
q=q,
phi=np.deg2rad(45),
theta=np.deg2rad(90),
seeds=seeds,
radii=1)
self.assertTrue(True)
seeds = sb.seeds.triangular_grid(300, 300, 2, center=True)
fb = sb.build.cuboid(p=[-5] * 3,
q=[5] * 3,
phi=0,
theta=np.deg2rad(0),
seeds=seeds,
radii=1)
cut_fb = obj.fiber_bundle.Cut(fb, [[-5] * 3, [5] * 3])
for f0, f1 in zip(fb, cut_fb):
self.assertTrue(np.array_equal(f0, f1))
def test_build_bundle(self):
traj = np.array([[0, 0, 0], [0, 0, 100]])
seeds = np.array([[0, 0], [1, 1], [-1, 1]])
fiber_bundle = sb.build.bundle(traj, seeds, 1)
for i in range(len(fiber_bundle)):
self.assertTrue(
np.array_equal(fiber_bundle[i][0, :3],
[seeds[i][0], seeds[i][1], 0]))
self.assertTrue(
np.array_equal(fiber_bundle[i][1, :3],
[seeds[i][0], seeds[i][1], 100]))
if __name__ == '__main__':
unittest.main()
```
#### File: tests/objects/fiber_manipulation_test.py
```python
import unittest
import numpy as np
import fastpli.objects
import fastpli.tools
class MainTest(unittest.TestCase):
# TODO: implement object.fiber.*manipulations*
def setUp(self):
self.fiber = np.array([[0, 0, 0, 1], [1, 1, 1, 2]], dtype=float)
self.fiber_bundle = [self.fiber.copy()]
self.fiber_bundles = [[self.fiber.copy()]]
def test_resize(self):
fiber = fastpli.objects.fiber.Rescale(self.fiber, 10)
self.assertTrue(np.array_equal(fiber, self.fiber * 10))
fb = fastpli.objects.fiber_bundle.Rescale(self.fiber_bundle, 10)
for f in fb:
self.assertTrue(np.array_equal(f, self.fiber * 10))
fbs = fastpli.objects.fiber_bundles.Rescale(self.fiber_bundles, 10)
for fb in fbs:
for f in fb:
self.assertTrue(np.array_equal(f, self.fiber * 10))
fiber = fastpli.objects.fiber.Rescale(self.fiber, 10, mod='points')
self.assertTrue(np.array_equal(fiber[:, :-2], self.fiber[:, :-2] * 10))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1]))
fiber = fastpli.objects.fiber.Rescale(self.fiber, 10, mod='radii')
self.assertTrue(np.array_equal(fiber[:, :-2], self.fiber[:, :-2]))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1] * 10))
def test_rotation(self):
fiber = fastpli.objects.fiber.Rotate(self.fiber,
fastpli.tools.rotation.x(0))
self.assertTrue(np.array_equal(self.fiber, fiber))
fiber = fastpli.objects.fiber.Rotate(
self.fiber, fastpli.tools.rotation.x(np.deg2rad(90)))
self.assertTrue(
np.allclose(fiber, np.array([[0, 0, 0, 1], [1, -1, 1, 2]])))
fiber = fastpli.objects.fiber.Rotate(
self.fiber, fastpli.tools.rotation.x(np.deg2rad(90)), [1, 1, 1])
self.assertTrue(
np.allclose(fiber, np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
for f in self.fiber_bundle:
fiber = fastpli.objects.fiber.Rotate(
f, fastpli.tools.rotation.x(np.deg2rad(90)), [1, 1, 1])
self.assertTrue(
np.allclose(fiber, np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
for fb in self.fiber_bundles:
for f in fb:
fiber = fastpli.objects.fiber.Rotate(
f, fastpli.tools.rotation.x(np.deg2rad(90)), [1, 1, 1])
self.assertTrue(
np.allclose(fiber, np.array([[0, 2, 0, 1], [1, 1, 1, 2]])))
def test_translate(self):
fiber = fastpli.objects.fiber.Translate(self.fiber, [1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(fiber[:, -1], self.fiber[:, -1]))
for f in self.fiber_bundle:
fiber = fastpli.objects.fiber.Translate(f, [1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(f[:, -1], self.fiber[:, -1]))
for fb in self.fiber_bundles:
for f in fb:
fiber = fastpli.objects.fiber.Translate(f, [1, 1, 1])
self.assertTrue(
np.array_equal(fiber[:, :3],
self.fiber[:, :3] + np.array([1, 1, 1])))
self.assertTrue(np.array_equal(f[:, -1], self.fiber[:, -1]))
def test_cut(self):
fiber = np.array([[0, 0, 0, 1], [1, 1, 1, 2]], dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[-10] * 3, [10] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(np.array_equal(fibers[0], fiber))
fiber = np.array([[0, 0, 0, 1], [10, 10, 10, 2]], dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(np.array_equal(fibers[0], fiber))
fiber = np.array([[0, 0, 0, 1], [10, 10, 10, 2], [100, 100, 100, 2]],
dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 1)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0], fiber))
fiber = np.array([[0, 0, 0, 1], [10, 10, 10, 2], [100, 100, 100, 2],
[10, 10, 10, 2], [0, 0, 0, 1]],
dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[-5] * 3, [5] * 3])
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0], fiber))
self.assertTrue(not np.array_equal(fibers[1], fiber))
fiber_bundle = [fiber]
cut_fb = fastpli.objects.fiber_bundle.Cut(fiber_bundle,
[[-5] * 3, [5] * 3])
fibers = cut_fb
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0], fiber))
self.assertTrue(not np.array_equal(fibers[1], fiber))
fiber_bundles = [[fiber]]
cut_fbs = fastpli.objects.fiber_bundles.Cut(fiber_bundles,
[[-5] * 3, [5] * 3])
fibers = cut_fbs[0]
self.assertTrue(len(cut_fbs) == 1)
self.assertTrue(len(fibers) == 2)
self.assertTrue(fibers[0].shape[0] == 2)
self.assertTrue(fibers[1].shape[0] == 2)
self.assertTrue(not np.array_equal(fibers[0], fiber))
self.assertTrue(not np.array_equal(fibers[1], fiber))
fiber = np.array([[0, 0, 0, 1], [10, 10, 10, 2]], dtype=float)
fibers = fastpli.objects.fiber.Cut(fiber, [[5] * 3, [6] * 3])
self.assertTrue(np.array_equal(fibers[0], fiber))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jifengyeying/ykt",
"score": 3
} |
#### File: ykt/lib/login_qiandao.py
```python
import requests
from . import pwd
def cookie_str_to_dict(cookie_str):
cookie_dict= {}
list_ = cookie_str.split(';') if cookie_str.find('; ') == -1 else cookie_str.split('; ')
for i in list_:
cookie_dict.update({i.split('=')[0]:i.split('=')[1]})
return cookie_dict
def cookie_dict_to_str(cookie_dict):
cookie_str = ''
for i in cookie_dict.keys():
cookie_str += i+'='+cookie_dict.get(i)+'; '
return cookie_str[:-2]
def login(user, password):
login_dict = {'account': '', 'password': '', 'ipForget':'true'}
login_dict['account'] = user
login_dict['password'] = <PASSWORD>(password)
headers = {'Host': 'ykt.zenking.cc', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Referer': '', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 'Cookie': ''}
login_headers = requests.post(url='http://ykt.zenking.cc/user/ajax/login', data=login_dict, headers=headers).headers
if not login_headers.get('Set-Cookie'):
return None
result = 'SESSION='+requests.post('http://ykt.zenking.cc/dialog/ajax/loginReg').headers['Set-Cookie'].split(';')[0].split('=')[1]+';'
for i in login_headers.get('Set-Cookie').split(' Expires='):
for j in i.split('Path=/, '):
if not j.endswith(' '):
if not j.endswith('/'):
result += j
if result.endswith(';'):
result = result[:-1]
result_dict = cookie_str_to_dict(result)
result_dict['web_user_logindatatime'] = result_dict['web_user_logindatatimeinxedu_customer_chanjing']
result = cookie_dict_to_str(result_dict)
return result
def qiandao():
pass
def cookie_change_test():
cookie = 'WEB_USER_LOGIN_PREFIXinxedu_customer_chanjing=26b450e86dcf4a54baf5fd1f562373a9; web_user_logindatatimeinxedu_customer_chanjing=2020-04-17%2019%3A22%3A27; web_user_logindatatime=2020-04-17%2019%3A22%3A27; courseIdinxedu_customer_chanjing=193; SESSION=MTZhZTIxOTAtM2Y5Ni00MzNlLWE4M2MtZTZmZjJiODBlZjIx; ONLINE_NUMBERinxedu_customer_chanjing=WEB_LOGINER29b023416f3f4963a533474ce6fc948c; s=1587259307570'
print('Yes') if cookie == cookie_dict_to_str(cookie_str_to_dict(cookie)) else print('No')
```
#### File: ykt/lib/spider.py
```python
import requests
from bs4 import BeautifulSoup as bs
import re
import mypy.data
import mypy.download
import datetime
class NoSettingException(Exception):
def __init__(self):
super().__init__()
def __str__(self):
return '你还没有设置或选择!'
# 这里是高三专用代码,高一高二是下面的
def gaosan(choice, class_='高三理科', data_path_name='data', aria2path=r'.\aria2c.exe'):
print(choice)
if not mypy.data.read_data('cookie', data_path_name=data_path_name):
raise NoSettingException
time = datetime.datetime.now().date().strftime('%m%d')
li_dict = {'高三理科数学':'ma_l_3'}
wen_dict = {'高三文科数学':'ma_w_3'}
dict_ = {'地理':'go_3', '语文':'cn_3', '物理':'ph_3', '英语':'en_3', '政治':'zz_3', '历史':'hi_3', '化学':'ch_3', '生物':'bo_3'}
dict_.update(li_dict if class_.find('文') == -1 else wen_dict)
for i in dict_.keys():
if choice == '所有':
url = 'http://vd.mincoo.com:8088/jyyz/' + time + '/' + dict_.get(i) + '.mp4'
mypy.download.aria2(dict_.get(i) + '.mp4', url, aria2path=aria2path, referer='http://ykt.zenking.cc/')
else:
if i.find(choice) != -1:
url = 'http://vd.mincoo.com:8088/jyyz/' + time + '/' + dict_.get(i) + '.mp4'
mypy.download.aria2(dict_.get(i) + '.mp4', url, aria2path=aria2path, referer='http://ykt.zenking.cc/')
class ykt:
def __init__(self, data_path_name='data'):
# 从浏览器截取到的一些信息
headers = {'Host': 'ykt.zenking.cc', 'Connection': 'keep-alive', 'Cache-Control': 'max-age=0', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Referer': '', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 'Cookie': ''}
video_headers = {'Host': 'ykt.zenking.cc', 'Connection': 'keep-alive', 'Content-Length': '28', 'Accept': 'text/plain, */*; q=0.01', 'X-Requested-With': 'XMLHttpRequest', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'Origin': 'http://ykt.zenking.cc', 'Referer': '', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6', 'Cookie': ''}
# 读取信息
headers['Cookie'] = mypy.data.read_data('cookie', data_path_name=data_path_name)
video_headers['Cookie'] = mypy.data.read_data('cookie', data_path_name=data_path_name)
if not headers['Cookie']:
raise NoSettingException
self.headers = headers
self.video_headers = headers
self.aria2path = r'.\lib\aria2c.exe'
self.time = datetime.datetime.now().date()
# 获取时间并去掉前面的0
self.time_without_zero = str(int(self.time.strftime('%m'))) + '月' + str(int(self.time.strftime('%d')))
def main(self, choice_num=None, class_=None):
# 下载,choice要求列表(支持多个)!
for num in choice_num:
html_url = 'http://ykt.zenking.cc/uc/play/' + num + '/0'
print(html_url)
# 设置Referer
self.headers['Referer'] = 'http://ykt.zenking.cc/front/couinfo/' + num
self.video_headers['Referer'] = 'http://ykt.zenking.cc/uc/play/' + num + '/0'
filename, urls = self.get_video_real_name_and_url(html_url, class_)
self.download(urls, filename)
def download(self, download_url, filename):
# 调用aria2批量下载
if download_url != -1:
# 可能一天有多个视频
for i in range(len(download_url)):
# 因为使用的是命令行,使用不能有空格,要加上""
mypy.download.aria2(name='"'+filename[i]+download_url[i][-4:]+'"', url=download_url[i],
aria2path=self.aria2path, referer="http://ykt.zenking.cc")
def get_video_real_name_and_url(self, html_url, class_):
# 用beautifulsoup是因为他的find_all函数很好用,然后通过分析得到规律,抓取信息
html = bs(requests.get(url=html_url, headers=self.headers).text, 'html.parser')
video = re.findall(r"getPlayerHtml\((.+?)\)",
str(html.find_all("a", href="javascript:void(0)")))
# 上面的是全部视频,这个是对应年级对应时间的
video_class_today = []
# 这里的find()会输出字符所在位置,没有时为-1,而不是True或False
for i in video:
if str(i).find(self.time_without_zero) != -1:
if str(i).find(class_) != -1:
video_class_today.append(i)
# 注意注意 这里不能写成a=b=[]的形式!!!
kpointIds = []
file_name = []
urls = []
# 获取kpointIds和file_name
for i in video_class_today:
a = str(i).split(',')
kpointIds.append(a[0])
# 他的名字是类似'2月26日\t高一\t数学期末试题评讲'的,所以要去掉\t和'
# 并且这里需要注意的是因为str.replace方法是由c语言写的,所以不能用形参,只能用位参,并且这里的\t也不用转义
file_name.append(a[2].replace('\t', '').replace("'", ''))
# 得到id之后获取视频真实链接
for kpointId in kpointIds:
data = (('kpointId', kpointId), ('playFromType', '2'))
# 这里本来用正则表达式更好,可惜我不怎么会
u = str(bs(requests.post(url="http://ykt.zenking.cc/front/ajax/checkKpoint", data=data,
headers=self.video_headers).text, 'html.parser'))
# 检查视频是否过期
if u.find('.mp4') != -1:
urls.append(u[u.find('src=')+5:u.find('.mp4')] + '.mp4')
elif u.find('courseKpoint/pdf/') != -1:
urls.append(u[u.find('http://ykt.zenking.cc/images/upload/courseKpoint/pdf/'):u.find('.pdf')] + '.pdf')
else:
return -1,-1
return file_name, urls
if __name__ == '__main__':
a = dict(语文='193', 高一二数学='205', 英语='196', 物理='200', 化学='201', 生物='199',
历史='197', 政治='202', 地理='198', 团课='204', 体育='210', 音乐='208', 心理='203', 美术='209')
num = []
for i in a.keys():
num.append(a.get(i))
spider = ykt(r'.\lib\data')
print('高一')
spider.main(choice_num=num, class_='高一')
print('高三')
gaosan('所有', '高三理科', 'data', r'.\lib\aria2c.exe')
``` |
{
"source": "jifflund/lab2d",
"score": 3
} |
#### File: lab2d/dmlab2d/random_agent.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import numpy as np
import pygame
import dmlab2d
from dmlab2d import runfiles_helper
def _make_int32_distribution(random, minimum, maximum):
def function():
return random.randint(minimum, maximum + 1)
return function
def _make_float64_distribution(random, minimum, maximum):
def function():
return random.uniform(minimum, maximum)
return function
class PyGameRandomAgent(object):
"""Random agent works with int32 or float64 bounded actions."""
def __init__(self, action_spec, observation_name, observation_spec, seed,
scale):
"""Create a PyGame agent.
Args:
action_spec: Environment action spec used to generate random actions.
observation_name: Name of observation to render each frame.
observation_spec: Environment observation spec for creating PyGame window.
seed: Agent seed used for generating random actions.
scale: Scales screen.
"""
self._observation_name = observation_name
random = np.random.RandomState(seed)
self._actions = []
self._scores = []
self._scale = scale
for name, spec in action_spec.items():
if spec.dtype == np.dtype('int32'):
self._actions.append(
(name, _make_int32_distribution(random, spec.minimum,
spec.maximum)))
elif spec.dtype == np.dtype('float64'):
self._actions.append(
(name, _make_float64_distribution(random, spec.minimum,
spec.maximum)))
else:
print("Warning '{}' is not supported".format(spec))
obs_spec = observation_spec[observation_name]
self._setup_py_game(obs_spec.shape)
def _setup_py_game(self, shape):
pygame.init()
pygame.display.set_caption('DM Lab2d')
self._game_display = pygame.display.set_mode(
(int(shape[1] * self._scale), int(shape[0] * self._scale)))
def _render_observation(self, observation):
obs = np.transpose(observation, (1, 0, 2))
surface = pygame.surfarray.make_surface(obs)
rect = surface.get_rect()
surf = pygame.transform.scale(
surface, (int(rect[2] * self._scale), int(rect[3] * self._scale)))
self._game_display.blit(surf, dest=(0, 0))
pygame.display.update()
def step(self, timestep):
"""Renders timestep and returns random actions according to spec."""
self._render_observation(timestep.observation[self._observation_name])
display_score_dirty = False
if timestep.reward is not None:
if timestep.reward != 0:
self._scores[-1] += timestep.reward
display_score_dirty = True
else:
self._scores.append(0)
display_score_dirty = True
if display_score_dirty:
pygame.display.set_caption('%d score' % self._scores[-1])
return {name: gen() for name, gen in self._actions}
def print_stats(self):
print('Scores: ' + ', '.join(str(score) for score in self._scores))
def _create_environment(args):
"""Creates an environment.
Args:
args: See `main()` for description of args.
Returns:
dmlab2d.Environment with one observation.
"""
args.settings['levelName'] = args.level_name
lab2d = dmlab2d.Lab2d(runfiles_helper.find(), args.settings)
return dmlab2d.Environment(lab2d, [args.observation], args.env_seed)
def _run(args):
"""Runs a random agent against an environment rendering the results.
Args:
args: See `main()` for description of args.
"""
env = _create_environment(args)
agent = PyGameRandomAgent(env.action_spec(), args.observation,
env.observation_spec(), args.agent_seed, args.scale)
for _ in range(args.num_episodes):
timestep = env.reset()
# Run single episode.
while True:
# Query PyGame for early termination.
if any(event.type == pygame.QUIT for event in pygame.event.get()):
print('Exit early last score may be truncated:')
agent.print_stats()
return
action = agent.step(timestep)
timestep = env.step(action)
if timestep.last():
# Observe last frame of episode.
agent.step(timestep)
break
# All episodes completed, report per episode.
agent.print_stats()
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--level_name', type=str, default='clean_up', help='Level name to load')
parser.add_argument(
'--observation',
type=str,
default='WORLD.RGB',
help='Observation to render')
parser.add_argument(
'--settings', type=json.loads, default={}, help='Settings as JSON string')
parser.add_argument(
'--env_seed', type=int, default=0, help='Environment seed')
parser.add_argument('--agent_seed', type=int, default=0, help='Agent seed')
parser.add_argument(
'--num_episodes', type=int, default=1, help='Number of episodes')
parser.add_argument(
'--scale', type=float, default=1, help='Scale to render screen')
args = parser.parse_args()
_run(args)
if __name__ == '__main__':
main()
``` |
{
"source": "jiffyclub/regexmagic",
"score": 3
} |
#### File: jiffyclub/regexmagic/regexmagic.py
```python
import re
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.display import display, HTML
MATCH_TEMPL = '<font color="{0}"><u>{1}</u></font>'
PATTERN_TEMPL = '<font color="green"><strong>{0}</strong></font>\n'
@magics_class
class RegexMagic(Magics):
'''Provide the 'regex' calling point for the magic, and keep track of
alternating colors while matching.'''
this_color, next_color = 'red', 'blue'
@cell_magic
def regex(self, pattern, text):
pattern_str = PATTERN_TEMPL.format(pattern)
result_str = [self.handle_line(pattern, line) for line in text.split('\n')]
display(HTML(pattern_str + '\n'.join(result_str)))
def handle_line(self, pattern, line):
result = []
m = re.search(pattern, line)
while m:
result.append(line[:m.start()])
result.append(MATCH_TEMPL.format(self.this_color, line[m.start():m.end()]))
self.this_color, self.next_color = self.next_color, self.this_color
line = line[m.end():]
m = re.search(pattern, line)
result.append(line)
return '<br/>{0}'.format(''.join(result))
def load_ipython_extension(ipython):
ipython.register_magics(RegexMagic)
``` |
{
"source": "jiffyclub/scipy",
"score": 2
} |
#### File: scipy/tools/refguide_check.py
```python
import sys
import re
import copy
import inspect
from argparse import ArgumentParser, REMAINDER
import scipy
from scipy import (cluster, constants, fftpack, integrate, interpolate, io,
linalg, misc, ndimage, odr, optimize, signal, sparse,
spatial, special, stats)
# TODO: sparse.csgraph, sparse.linalg, stats.mstats, cluster.vq,
# cluster.hierarchy
def find_funcnames(module):
funcnames = set()
# 3 spaces followed by function name; only function names listed in
# refguide are indented like this (mostly, there may be some false
# positives)
pattern = re.compile("(\s\s\s[a-z_0-9A-Z]+)")
for line in module.__doc__.splitlines():
res = re.search(pattern, line)
if res is not None:
funcname = res.groups()[0].lstrip()
funcnames.add(funcname)
return funcnames
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
all = copy.deepcopy(module.__all__)
for name in ['absolute_import', 'division', 'print_function']:
try:
all.remove(name)
except ValueError:
pass
# somehow some modules survive the first iteration (?)
for _ in range(2):
for name in all:
if inspect.ismodule(getattr(module, name)):
all.remove(name)
return all
def compare(all, funcnames):
"""Return sets of objects only in one of __all__, refguide."""
only_all = set()
for name in all:
if name not in funcnames:
only_all.add(name)
only_ref = set()
for name in funcnames:
if name not in all:
only_ref.add(name)
return only_all, only_ref
def report(all, funcnames, module_name):
"""Print out a report for the module"""
num_all = len(all)
num_ref = len(funcnames)
print("Number of functions in __all__: %i" % num_all)
print("Number of functions in refguide: %i" % num_ref)
only_all, only_ref = compare(all, funcnames)
if len(only_all) == len(only_ref) == 0:
print("\nAll good!")
else:
if len(only_all) > 0:
print("")
print("Objects in %s.__all__ but not in refguide:" % module_name)
print("------------------------------------------")
for name in only_all:
print(name)
if len(only_ref) > 0:
print("")
print("Objects in refguide but not in %s.__all__:" % module_name)
print("------------------------------------------")
for name in only_ref:
print(name)
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_name", metavar="ARGS", default=[],
nargs=REMAINDER, help="Valid Scipy submodule name")
args = parser.parse_args(argv)
module_name = args.module_name[0]
module = getattr(scipy, module_name)
funcnames = find_funcnames(module)
all = get_all_dict(module)
report(all, funcnames, module_name)
if __name__ == '__main__':
main(argv=sys.argv[1:])
``` |
{
"source": "jiforcen/orderedweightedpooling",
"score": 2
} |
#### File: orderedweightedpooling/pooling/legacy_sort.py
```python
import numpy as np
import tensorflow as tf
def sort_p2x2(x):
_, pool_height, pool_width, channels, elems = x.get_shape().as_list()
x = tf.reshape(x, [-1, elems])
rows, _ = x.get_shape().as_list()
# 1st stage
x_1 = tf.slice(x, [0, 0], [-1, 1])
x_2 = tf.slice(x, [0, 1], [-1, 1])
x_3 = tf.slice(x, [0, 2], [-1, 1])
x_4 = tf.slice(x, [0, 3], [-1, 1])
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x_34_greater = tf.greater(x_3, x_4)
x_aux = tf.where(x_34_greater, x_3, x_4)
x_4 = tf.where(tf.logical_not(x_34_greater), x_3, x_4)
x_3 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4], axis=1))
# 2nd stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4], axis=1))
# 3rd stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4], axis=1))
x = tf.reshape(x, [-1, pool_height, pool_width, channels, elems]) # Reshape tensor
return x
def sort_p3x3(x):
_, pool_height, pool_width, channels, elems = x.get_shape().as_list()
x = tf.reshape(x, [-1, elems])
rows, _ = x.get_shape().as_list()
# 1st stage
x_1 = tf.slice(x, [0, 0], [-1, 1])
x_2 = tf.slice(x, [0, 1], [-1, 1])
x_3 = tf.slice(x, [0, 2], [-1, 1])
x_4 = tf.slice(x, [0, 3], [-1, 1])
x_5 = tf.slice(x, [0, 4], [-1, 1])
x_6 = tf.slice(x, [0, 5], [-1, 1])
x_7 = tf.slice(x, [0, 6], [-1, 1])
x_8 = tf.slice(x, [0, 7], [-1, 1])
x_9 = tf.slice(x, [0, 8], [-1, 1])
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x_34_greater = tf.greater(x_3, x_4)
x_aux = tf.where(x_34_greater, x_3, x_4)
x_4 = tf.where(tf.logical_not(x_34_greater), x_3, x_4)
x_3 = x_aux
x_45_greater = tf.greater(x_4, x_5)
x_aux = tf.where(x_45_greater, x_4, x_5)
x_5 = tf.where(tf.logical_not(x_45_greater), x_4, x_5)
x_4 = x_aux
x_56_greater = tf.greater(x_5, x_6)
x_aux = tf.where(x_56_greater, x_5, x_6)
x_6 = tf.where(tf.logical_not(x_56_greater), x_5, x_6)
x_5 = x_aux
x_67_greater = tf.greater(x_6, x_7)
x_aux = tf.where(x_67_greater, x_6, x_7)
x_7 = tf.where(tf.logical_not(x_67_greater), x_6, x_7)
x_6 = x_aux
x_78_greater = tf.greater(x_7, x_8)
x_aux = tf.where(x_78_greater, x_7, x_8)
x_8 = tf.where(tf.logical_not(x_78_greater), x_7, x_8)
x_7 = x_aux
x_89_greater = tf.greater(x_8, x_9)
x_aux = tf.where(x_89_greater, x_8, x_9)
x_9 = tf.where(tf.logical_not(x_89_greater), x_8, x_9)
x_8 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9], axis=1))
# 2nd stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x_34_greater = tf.greater(x_3, x_4)
x_aux = tf.where(x_34_greater, x_3, x_4)
x_4 = tf.where(tf.logical_not(x_34_greater), x_3, x_4)
x_3 = x_aux
x_45_greater = tf.greater(x_4, x_5)
x_aux = tf.where(x_45_greater, x_4, x_5)
x_5 = tf.where(tf.logical_not(x_45_greater), x_4, x_5)
x_4 = x_aux
x_56_greater = tf.greater(x_5, x_6)
x_aux = tf.where(x_56_greater, x_5, x_6)
x_6 = tf.where(tf.logical_not(x_56_greater), x_5, x_6)
x_5 = x_aux
x_67_greater = tf.greater(x_6, x_7)
x_aux = tf.where(x_67_greater, x_6, x_7)
x_7 = tf.where(tf.logical_not(x_67_greater), x_6, x_7)
x_6 = x_aux
x_78_greater = tf.greater(x_7, x_8)
x_aux = tf.where(x_78_greater, x_7, x_8)
x_8 = tf.where(tf.logical_not(x_78_greater), x_7, x_8)
x_7 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9], axis=1))
# 3rd stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x_34_greater = tf.greater(x_3, x_4)
x_aux = tf.where(x_34_greater, x_3, x_4)
x_4 = tf.where(tf.logical_not(x_34_greater), x_3, x_4)
x_3 = x_aux
x_45_greater = tf.greater(x_4, x_5)
x_aux = tf.where(x_45_greater, x_4, x_5)
x_5 = tf.where(tf.logical_not(x_45_greater), x_4, x_5)
x_4 = x_aux
x_56_greater = tf.greater(x_5, x_6)
x_aux = tf.where(x_56_greater, x_5, x_6)
x_6 = tf.where(tf.logical_not(x_56_greater), x_5, x_6)
x_5 = x_aux
x_67_greater = tf.greater(x_6, x_7)
x_aux = tf.where(x_67_greater, x_6, x_7)
x_7 = tf.where(tf.logical_not(x_67_greater), x_6, x_7)
x_6 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9], axis=1))
# 4th stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x_34_greater = tf.greater(x_3, x_4)
x_aux = tf.where(x_34_greater, x_3, x_4)
x_4 = tf.where(tf.logical_not(x_34_greater), x_3, x_4)
x_3 = x_aux
x_45_greater = tf.greater(x_4, x_5)
x_aux = tf.where(x_45_greater, x_4, x_5)
x_5 = tf.where(tf.logical_not(x_45_greater), x_4, x_5)
x_4 = x_aux
x_56_greater = tf.greater(x_5, x_6)
x_aux = tf.where(x_56_greater, x_5, x_6)
x_6 = tf.where(tf.logical_not(x_56_greater), x_5, x_6)
x_5 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9], axis=1))
# 5th stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x_34_greater = tf.greater(x_3, x_4)
x_aux = tf.where(x_34_greater, x_3, x_4)
x_4 = tf.where(tf.logical_not(x_34_greater), x_3, x_4)
x_3 = x_aux
x_45_greater = tf.greater(x_4, x_5)
x_aux = tf.where(x_45_greater, x_4, x_5)
x_5 = tf.where(tf.logical_not(x_45_greater), x_4, x_5)
x_4 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9], axis=1))
# 6th stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x_34_greater = tf.greater(x_3, x_4)
x_aux = tf.where(x_34_greater, x_3, x_4)
x_4 = tf.where(tf.logical_not(x_34_greater), x_3, x_4)
x_3 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9], axis=1))
# 7th stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x_23_greater = tf.greater(x_2, x_3)
x_aux = tf.where(x_23_greater, x_2, x_3)
x_3 = tf.where(tf.logical_not(x_23_greater), x_2, x_3)
x_2 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9], axis=1))
# 8th stage
x_12_greater = tf.greater(x_1, x_2)
x_aux = tf.where(x_12_greater, x_1, x_2)
x_2 = tf.where(tf.logical_not(x_12_greater),x_1, x_2)
x_1 = x_aux
x = tf.squeeze(tf.stack([x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9], axis=1))
# Reshape
x = tf.reshape(x, [-1, pool_height, pool_width, channels, elems]) # Reshape tensor
return x
```
#### File: jiforcen/orderedweightedpooling/tensor.py
```python
import tensorflow as tf
from keras import backend as K
import numpy as np
def ow2_reg_un(weight_matrix, a1, a2):
return (a1 * K.sum(K.abs(1-K.sum(weight_matrix, axis=1))) +
a2 * K.sum(-tf.minimum(0.0, weight_matrix)))
def ow2_reg_uns(weight_matrix, a1, a2, a3):
result = weight_matrix - tf.manip.roll(weight_matrix, shift=1, axis=1)
shape = tf.shape(weight_matrix)
result = tf.reduce_sum (tf.square(tf.slice(result,[0, 1],[shape[0], shape[-1]-1])))
return (a3 * result + ow2_reg_un(weight_matrix, a1, a2))
not_equal_weights_ow2 = tf.constant(np.array([[0.25, -0.25, 0.25, 0.25],
[0.25, 0.25, -0.25, 0.25]]),
dtype=tf.float32)
reg_op = ow2_reg_uns(not_equal_weights_ow2, 0, 0, 1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
print (reg_val)
```
#### File: orderedweightedpooling/test/test_regularizers.py
```python
import unittest
import numpy as np
import keras
from keras.initializers import Constant
from keras.layers import (
MaxPooling2D,
AveragePooling2D,
Input,
Dense,
Flatten,
Activation,
)
from keras.models import Model
from pooling.pooling_layers import (
OW1Pooling2D,
OW2Pooling2D,
OW3Pooling2D,
)
from pooling.ow_regularizers import (
ow1_reg_un,
ow1_reg_uns,
ow2_reg_un,
ow2_reg_uns,
)
import tensorflow as tf
class Test_PosUnitConstraint(unittest.TestCase):
""" Test OW1 Regularizers"""
def setUp(self):
self.x_input = np.random.rand(10, 4, 4, 2)
self.input_tensor = Input(shape=self.x_input.shape[1:])
self.y = np.array([0, 1]).reshape(1,2)
self.y = np.ones((10, 2))
self.pool_size = (4, 4)
self.optimizer = keras.optimizers.Adam(lr=.01)
def test_ow1_regularizer(self):
""" Test ow-pool with mean weights"""
def regularizer(weight_matrix):
return ow1_reg_un(weight_matrix, .1, .1)
neg_ones_ini = -np.ones(self.pool_size[0] * self.pool_size[1])/2
w_initializer = Constant(value=neg_ones_ini)
x = OW1Pooling2D(pool_size=self.pool_size, name='ow', padding='same',
weights_regularizer=regularizer,
weights_initializer=w_initializer)(self.input_tensor)
x = Flatten()(x)
x = Dense(10)(x)
x = Dense(2)(x)
x = Activation('softmax')(x)
ow_model = Model(self.input_tensor, x)
ow_model.compile(optimizer=self.optimizer, loss='categorical_crossentropy',
metrics=['accuracy'])
ow_layer = ow_model.layers[-5]
ow_weights = ow_layer.get_weights()[0]
ow_model.fit(self.x_input, self.y, epochs=1000, verbose=0, batch_size=2)
ow_layer = ow_model.layers[-5]
ow_weights = ow_layer.get_weights()[0]
np.testing.assert_array_almost_equal(np.sum(ow_weights), 1, decimal=2)
self.assertFalse(np.sum(ow_weights<0))
class Test_RegOwa1(unittest.TestCase):
""" Test OW1 Regularizers"""
def setUp(self):
self.equal_weights_ow1 = tf.constant(np.array([0.25, 0.25, 0.25, 0.25]),
dtype=tf.float32)
self.not_equal_weights_ow1 = tf.constant(np.array([0.25, -0.25, 0.25, 0.25]),
dtype=tf.float32)
def test_ow1_regularizer_un_case_0(self):
""" Test ow1 regularizer"""
reg_op = ow1_reg_un(self.equal_weights_ow1, .1, .1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertEqual(reg_val, 0)
def test_ow1_regularizer_un_case_1(self):
""" Test ow1 regularizer"""
reg_op = ow1_reg_un(self.not_equal_weights_ow1, .1, 0)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertNotEqual(reg_val, 0)
def test_ow1_regularizer_un_case_2(self):
""" Test ow1 regularizer"""
reg_op = ow1_reg_un(self.not_equal_weights_ow1, 0, .1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertNotEqual(reg_val, 0)
def test_ow1_regularizer_uns_case_0(self):
""" Test ow1 regularizer"""
reg_op = ow1_reg_uns(self.equal_weights_ow1, .1, .1, .1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertEqual(reg_val, 0)
def test_ow1_regularizer_uns_case_1(self):
""" Test ow1 regularizer"""
reg_op = ow1_reg_uns(self.not_equal_weights_ow1, 0, 0, 0)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertEqual(reg_val, 0)
def test_ow1_regularizer_uns_case_2(self):
""" Test ow1 regularizer"""
reg_op = ow1_reg_uns(self.not_equal_weights_ow1, 0, 0, 1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertNotEqual(reg_val, 0)
class Test_RegOwa2(unittest.TestCase):
""" Test OW2 Regularizers"""
def setUp(self):
self.equal_weights_ow2 = tf.constant(np.array([[0.25, 0.25, 0.25, 0.25],
[0.25, 0.25, 0.25, 0.25]]),
dtype=tf.float32)
self.not_equal_weights_ow2 = tf.constant(np.array([[0.25, -0.25, 0.25, 0.25],
[0.25, -0.25, 0.25, 0.25]]),
dtype=tf.float32)
def test_ow2_regularizer_un_case_0(self):
""" Test OW2 Regularizers"""
reg_op = ow2_reg_un(self.equal_weights_ow2, .1, .1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertEqual(reg_val, 0)
def test_ow2_regularizer_un_case_1(self):
""" Test OW2 Regularizers"""
reg_op = ow2_reg_un(self.not_equal_weights_ow2, .1, 0)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertNotEqual(reg_val, 0)
def test_ow2_regularizer_un_case_2(self):
""" Test OW2 Regularizers"""
reg_op = ow2_reg_un(self.not_equal_weights_ow2, 0, .1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertNotEqual(reg_val, 0)
def test_ow2_regularizer_uns_case_0(self):
""" Test OW2 Regularizers"""
reg_op = ow2_reg_uns(self.equal_weights_ow2, .1, .1, .1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertEqual(reg_val, 0)
def test_ow2_regularizer_uns_case_1(self):
""" Test OW2 Regularizers"""
reg_op = ow2_reg_uns(self.not_equal_weights_ow2, 0, 0, 0)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertEqual(reg_val, 0)
def test_ow2_regularizer_uns_case_2(self):
""" Test OW2 Regularizers"""
reg_op = ow2_reg_uns(self.not_equal_weights_ow2, 0, 0, 1)
init_op = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init_op)
reg_val = sess.run(reg_op)
self.assertNotEqual(reg_val, 0)
``` |
{
"source": "jifox/nautobot-plugin-secrets-providers",
"score": 2
} |
#### File: nautobot_secrets_providers/providers/aws.py
```python
import base64
import json
try:
import boto3
from botocore.exceptions import ClientError
except (ImportError, ModuleNotFoundError):
boto3 = None
from django import forms
from nautobot.utilities.forms import BootstrapMixin
from nautobot.extras.secrets import exceptions, SecretsProvider
__all__ = ("AWSSecretsManagerSecretsProvider",)
class AWSSecretsManagerSecretsProvider(SecretsProvider):
"""A secrets provider for AWS Secrets Manager."""
slug = "aws-secrets-manager"
name = "AWS Secrets Manager"
is_available = boto3 is not None
class ParametersForm(BootstrapMixin, forms.Form):
"""Required parameters for AWS Secrets Manager."""
name = forms.CharField(
required=True,
help_text="The name of the AWS Secrets Manager secret",
)
region = forms.CharField(
required=True,
help_text="The region name of the AWS Secrets Manager secret",
)
key = forms.CharField(
required=True,
help_text="The key of the AWS Secrets Manager secret",
)
@classmethod
def get_value_for_secret(cls, secret, obj=None, **kwargs):
"""Return the secret value by name and region."""
# Extract the parameters from the Secret.
parameters = secret.rendered_parameters(obj=obj)
secret_name = parameters.get("name")
secret_key = parameters.get("key")
region_name = parameters.get("region")
# Create a Secrets Manager client.
session = boto3.session.Session()
client = session.client(service_name="secretsmanager", region_name=region_name)
# This is based on sample code to only handle the specific exceptions for the 'GetSecretValue' API.
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
# We rethrow the exception by default.
try:
get_secret_value_response = client.get_secret_value(SecretId=secret_name)
except ClientError as err:
if err.response["Error"]["Code"] == "DecryptionFailureException": # pylint: disable=no-else-raise
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretProviderError(secret, cls, str(err))
elif err.response["Error"]["Code"] == "InternalServiceErrorException":
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretProviderError(secret, cls, str(err))
elif err.response["Error"]["Code"] == "InvalidParameterException":
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretParametersError(secret, cls, str(err))
elif err.response["Error"]["Code"] == "InvalidRequestException":
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretProviderError(secret, cls, str(err))
elif err.response["Error"]["Code"] == "ResourceNotFoundException":
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise exceptions.SecretValueNotFoundError(secret, cls, str(err))
else:
# Decrypts secret using the associated KMS CMK.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if "SecretString" in get_secret_value_response:
secret_value = get_secret_value_response["SecretString"]
else:
# TODO(jathan): Do we care about this? Let's figure out what to do about a binary value?
secret_value = base64.b64decode(get_secret_value_response["SecretBinary"]) # noqa
# If we get this far it should be valid JSON.
data = json.loads(secret_value)
# Retrieve the value using the key or complain loudly.
try:
return data[secret_key]
except KeyError as err:
msg = f"The secret value could not be retrieved using key {err}"
raise exceptions.SecretValueNotFoundError(secret, cls, msg) from err
```
#### File: nautobot-plugin-secrets-providers/nautobot_secrets_providers/views.py
```python
from django.views.generic import TemplateView
from nautobot_secrets_providers import secrets
class SecretsProvidersHomeView(TemplateView):
"""Plugin home page for Secrets Providers."""
template_name = "nautobot_secrets_providers/home.html"
def get_context_data(self, **kwargs):
"""Inject `secrets_providers` into template context."""
ctx = super().get_context_data(**kwargs)
ctx["secrets_providers"] = secrets.secrets_providers
return ctx
``` |
{
"source": "jifox/nornir_rich",
"score": 2
} |
#### File: nornir_rich/tests/test.py
```python
from nornir_rich.plugins.functions import RichResults
from nornir import InitNornir
from nornir.core.task import Result
from nornir_utils.plugins.tasks.data import echo_data
nr = InitNornir()
rr = RichResults()
def stuff(task):
return Result(
host=task.host, exception=Exception("testing"), stdout=task, result=[1, 2, 3]
)
def level1(task):
task.run(level2)
def level2(task):
task.run(level3)
def level3(task):
return Result(host=task.host, stdout="abcd", result={"x": 9, "y": 10})
def gen_exception(task):
raise Exception('this is an exception')
rr.print(nr.run(task=echo_data, x=10, z=[1, 2, 3]))
rr.print(nr.run(task=stuff), vars=["stdout", "result", "exception"])
rr.print(nr.run(task=level1), vars=["stdout", "result", "exception"])
rr.print(nr.run(task=gen_exception), vars=["stdout", "result", "exception"])
``` |
{
"source": "jifox/relpath",
"score": 4
} |
#### File: relpath/relpath/__init__.py
```python
import os
from pathlib import Path
from typing import Union
def relative_path(base_path: Union[str, Path], rel_path: Union[str, Path]) -> str:
"""Returns the relative path from base path to rel_path
even when rel_path is not a subdirectory of base_path.
Args:
base_path (str|Path): Base for relative path
rel_path (str|Path): File or Directory that the relative path points to.
Returns:
str: relative path to access 'rel_path' from 'base_path'
"""
base = Path(base_path).absolute()
rel = Path(rel_path).absolute()
if not base.is_dir():
base = base.parent
common = ""
idx = 0
while (idx < min(len(base.parts), len(rel.parts))) and (
str(base.parts[idx]) == str(rel.parts[idx])
):
if len(common) > 0 and (common[-1] != os.sep):
common += os.sep
common += base.parts[idx]
idx += 1
diff_len = len(base.parts) - len(Path(common).parts)
res = ""
for dummy in range(0, diff_len):
res += ".." + os.sep
ofs = 0
if str(rel) == str(common):
return res
if len(res) > 0 and res[-1] == os.sep and str(rel)[len(str(common))] == os.sep:
ofs = 1
elif len(res) == 0:
ofs = 1
res += str(rel)[len(str(common)) + ofs :]
return res
```
#### File: relpath/tests/test_relpath.py
```python
from pathlib import Path
import sys
sys.path.append(str(Path(Path(__file__).parent).parent))
import pytest
import os
from relpath import relative_path
def test_relative_path():
testdir = str(Path(__file__).parent) + os.sep
testdata = [
{
"bas": str(Path(testdir).joinpath("dir1", "dir2")),
"rel": str(Path(testdir).joinpath("dir4", "test.txt")),
"res": str(Path("..").joinpath("..", "dir4", "test.txt")),
},
{
"bas": testdir,
"rel": str(Path(testdir).joinpath("dir4", "test.txt")),
"res": str(Path("dir4").joinpath("test.txt")),
},
{
"bas": str(Path(testdir).joinpath("dir1", "non-existing-filename")),
"rel": str(Path(testdir).joinpath("dir4", "test.txt")),
"res": str(Path("..").joinpath("dir4", "test.txt")),
},
{
"bas": "/home",
"rel": "/",
"res": "../",
},
]
for dat in testdata:
print("")
print("bas:", dat.get("bas"))
print("rel:", dat.get("rel"))
print("res:", dat.get("res"))
res = relative_path(dat.get("bas"), dat.get("rel"))
assert str(res) == str(dat.get("res"))
``` |
{
"source": "jifox/secret-server-reader",
"score": 2
} |
#### File: secread/tests/test_secread.py
```python
import os
import pytest
from secread import __version__, SecretServer
def test_version():
assert __version__ == "0.1.4"
@pytest.fixture
def sec_server():
return SecretServer()
def test_default_slugs_is_a_list(sec_server: SecretServer):
slugs = sec_server.SECRET_SERVER_DEFAULT_SLUGS
assert isinstance(slugs, list)
def test_secretserver(sec_server: SecretServer):
token = sec_server.getAuthToken()
assert len(token) > 0, "Token could not be read"
def test_get_secret_response_by_name(sec_server: SecretServer):
secname = os.getenv("TEST_SECRET_NAME", "GitLab Token netsearch-ro")
res = sec_server.searchSecretResponse(secname)
fields = sec_server.getFieldItemWithSlug(res)
assert "username" in fields.keys(), "Missing username"
assert "password" in fields.keys(), "Missing password"
def test_get_secret_by_name(sec_server: SecretServer):
secname = os.getenv("TEST_SECRET_NAME", "GitLab Token netsearch-ro")
res = sec_server.searchSecret(secname)
assert "username" in res.keys(), "Missing username"
assert "password" in res.keys(), "Missing password"
```
#### File: secret-server-reader/secread/thycotic.py
```python
from typing import Any, Dict, List, Optional, Union
from pathlib import Path
import json
import os
import requests
from dotenv import load_dotenv
from requests.models import Response
def strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
credits: https://github.com/nautobot
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return 1
elif val in ("n", "no", "f", "false", "off", "0"):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
def is_truthy(arg) -> bool:
"""Convert "truthy" strings into Booleans.
Examples:
>>> is_truthy('yes')
True
Args:
arg (str): Truthy string (True values are y, yes, t, true, on and 1; false values are n, no,
f, false, off and 0. Raises ValueError if val is anything else.
credits: https://github.com/nautobot
"""
if isinstance(arg, bool):
return arg
return bool(strtobool(arg))
class SecretServer:
# see .env.example
SECRET_SERVER_SITE: str
SECRET_SERVER_AUTH_API: str
SECRET_SERVER_USERNAME: str
SECRET_SERVER_PASSWORD: str
SECRET_SERVER_SSL_VERIFY: Union[bool, str]
SECRET_SERVER_API: str
SECRET_SERVER_DEFAULT_SLUGS: List[str] = ["id", "url", "username", "password"]
SECRET_SERVER_IS_DUMMY: bool
SECRET_SERVER_TEST_DUMMY_RESULT: Dict[Any, Any]
def __init__(self) -> None:
load_dotenv(".env")
self.SECRET_SERVER_SITE = os.getenv("SECRET_SERVER_SITE", "https://pw.example.local/SecretServer")
self.SECRET_SERVER_AUTH_API = os.getenv("SECRET_SERVER_AUTH_API", "/oauth2/token")
self.SECRET_SERVER_USERNAME = os.getenv("SECRET_SERVER_USERNAME", "thycotic_api_username")
self.SECRET_SERVER_PASSWORD = os.getenv("SECRET_SERVER_PASSWORD", "<PASSWORD>")
# SECRET_SERVER_SSL_VERIFY
# values:
# - True: certificate will be verified (Default)
# - False: certificate will be ignored
# - Path: path to trusted certificat bundle e.g. "/etc/ssl/certs/ca-bundle.crt"
ssl_verify = os.getenv("SECRET_SERVER_SSL_VERIFY", "True")
if Path(ssl_verify).exists():
self.SECRET_SERVER_SSL_VERIFY = ssl_verify
else:
self.SECRET_SERVER_SSL_VERIFY = is_truthy(ssl_verify)
slugliststr = os.getenv("SECRET_SERVER_DEFAULT_SLUGS", "")
if len(self.SECRET_SERVER_DEFAULT_SLUGS) > 0:
try:
self.SECRET_SERVER_DEFAULT_SLUGS = json.loads(slugliststr)
except:
pass
self.SECRET_SERVER_API = self.SECRET_SERVER_SITE + "/api/v1"
self.SECRET_SERVER_IS_DUMMY = is_truthy(os.getenv("SECRET_SERVER_IS_DUMMY", "False"))
try:
dummyres = json.loads(str(os.getenv("SECRET_SERVER_TEST_DUMMY_RESULT")))
except:
dummyres = None
if not dummyres or ("username" not in dummyres) or ("password" not in dummyres) or ("url" not in dummyres):
dummyres = {
"username": "testuser",
"password": "<PASSWORD>",
"url": "https://localhost/SecretServer",
}
self.SECRET_SERVER_TEST_DUMMY_RESULT = dummyres
self._isconnected = False
self.token = None
def getAuthToken(self):
"""Get token with given credentials"""
if self.SECRET_SERVER_IS_DUMMY:
return "DUMMY_TOKEN"
creds = {}
creds["username"] = self.SECRET_SERVER_USERNAME
creds["password"] = self.SECRET_SERVER_PASSWORD
creds["grant_type"] = "password"
uri = self.SECRET_SERVER_SITE + self.SECRET_SERVER_AUTH_API
headers = {
"Accept": "application/json",
"content-type": "application/x-www-form-urlencoded",
}
resp = requests.post(
uri,
data=creds,
headers=headers,
verify=self.SECRET_SERVER_SSL_VERIFY,
)
if resp.status_code not in (200, 304):
raise Exception(
"Problems getting a token from Secret Server for %s. %s %s"
% (self.SECRET_SERVER_USERNAME, resp.status_code, resp)
)
self.token = resp.json()["access_token"]
self._isconnected = True
return self.token
def getSecret(self, secretId: str):
"""Retrieve the infomation about the secret having id==secretid
Args:
secretId (str): Entry ID to retrieve fron server
Raises:
Exception: REST Api Call failed
Returns:
[Response.json()]: Answer from server
"""
if self.SECRET_SERVER_IS_DUMMY:
return {
"id": secretId,
"name": "DUMMY Secret",
"items": [
{
"itemId": 18582,
"itemValue": "https://example.com/net-automation/inventory.git",
"slug": "url",
},
{
"itemId": 18583,
"itemValue": self.SECRET_SERVER_TEST_DUMMY_RESULT["username"],
"slug": "username",
},
{
"itemId": 18584,
"itemValue": self.SECRET_SERVER_TEST_DUMMY_RESULT["password"],
"slug": "password",
},
],
}
if not self._isconnected:
self.getAuthToken()
headers = {
"Authorization": f"Bearer {self.token}",
"content-type": "application/json",
}
srv_response = requests.get(
self.SECRET_SERVER_API + f"/secrets/{secretId}",
headers=headers,
verify=self.SECRET_SERVER_SSL_VERIFY,
)
if srv_response.status_code not in (200, 304):
self._isconnected = False
raise Exception(f"Error retrieving Secret. {srv_response.status_code} {srv_response}")
self._response = srv_response.json()
return self._response
def getFieldItemWithSlug(self, response, slugs: Optional[List[str]] = None) -> Dict[str, Any]:
"""Return the field values from the fields selected in list[slugs]
Args:
response (Response.json()): response from secretserver
slugs ([str], optional): Slugs to extract. When None, the default is used. Defaults to None.
Returns:
[dict]: Fieldname and Value
"""
result = {}
if slugs is None:
slugs = self.SECRET_SERVER_DEFAULT_SLUGS
elif isinstance(slugs, str):
slugs = json.loads(slugs)
if "id" in slugs: # type: ignore
result = {"id": response["id"]}
if "name" in slugs: # type: ignore
result.update({"name": response.name})
for field in response["items"]:
if field["slug"] in slugs:
result.update({field["slug"]: field["itemValue"]})
return result
def searchSecretResponse(self, text, slugs: Optional[List[str]] = None):
"""Search the secret name and return
Args:
text ([type]): [description]
slugs ([type], optional): [description]. Defaults to None.
Raises:
Exception: Error retrieving Secret.
Returns:
[dict]: Extracted Secret field-names and values
"""
if not slugs:
slugs = self.SECRET_SERVER_DEFAULT_SLUGS
if isinstance(slugs, str):
slugs = json.loads(slugs)
if not self._isconnected:
self.getAuthToken()
headers = {
"Authorization": f"Bearer {self.token}",
"content-type": "application/json",
"filter.searchField": text,
}
if self.SECRET_SERVER_IS_DUMMY:
with Path(__file__).parent.joinpath("tests","data","get_secret_response.json").open("r") as stream:
srv_response_json = json.load(stream)
else:
srv_response = requests.get(
self.SECRET_SERVER_API + "/secrets/",
headers=headers,
verify=self.SECRET_SERVER_SSL_VERIFY,
)
if srv_response.status_code not in (200, 304):
self._isconnected = False
raise Exception(f"Error retrieving Secret. {srv_response.status_code} {srv_response}")
srv_response_json = srv_response.json()
found = list(filter(lambda x: text in x["name"], srv_response_json["records"]))
if found:
secret_id = str(found[0]["id"])
return self.getSecret(secret_id)
return srv_response_json()
def searchSecret(self, name_or_id, slugs: Optional[List[str]] = None):
"""Search a secret by name or id
Args:
name_or_id (str): Secret Name or ID to search fo
Returns:
dict: Field names and values
"""
try:
secid = int(name_or_id)
resp = self.getSecret(str(secid))
except ValueError:
resp = self.searchSecretResponse(name_or_id)
return self.getFieldItemWithSlug(resp, slugs=slugs)
``` |
{
"source": "jifranco/astr-119-hw-2",
"score": 3
} |
#### File: jifranco/astr-119-hw-2/functions.py
```python
import numpy as np
import sys
def expo(x):
return np.exp(x)
def show_expo(n):
for i in range(n):
print(expo(float(i)))
def main():
n=10
if(len(sys.argv)>1):
n = int(sys.argv[1])
show_expo(n)
if __name__ == "__main__":
main()
``` |
{
"source": "JifuZhao/Lending-Club-Loan-Analysis",
"score": 3
} |
#### File: JifuZhao/Lending-Club-Loan-Analysis/utils.py
```python
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
import matplotlib.pyplot as plt
import plotly.figure_factory as ff
import plotly.graph_objs as go
from plotly import tools
warnings.simplefilter('ignore')
def plot_numerical(data, feature, figsize=(16, 5)):
""" helper function for visualization using Seaborn """
data = data[~data[feature].isnull()]
grouped = data[[feature, 'target']].groupby(feature)
mean = grouped.mean().reset_index()
hist_kws={'histtype': 'bar', 'edgecolor':'black', 'alpha': 0.2}
warnings.filterwarnings('ignore')
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=figsize)
sns.distplot(data[data['target'] == 0][feature], label='Target: 0',
ax=ax[0], hist_kws=hist_kws)
sns.distplot(data[data['target'] == 1][feature], label='Target: 1',
ax=ax[0], hist_kws=hist_kws)
ax[0].legend()
ax[1].plot(mean[feature], mean['target'], '.:', alpha=0.5)
ax[1].set_xlabel(feature)
ax[1].set_ylabel('Mean')
ax[1].grid(True)
plt.tight_layout()
return fig, ax
def discrete_plot(data, feature, width=800, height=400):
""" function to plot the discrete variable with Plotly """
# make subplots
titles = ('Distribution Plot of ' + feature.capitalize(),
'Default Rate vs. '+ feature.capitalize())
fig = tools.make_subplots(rows=1, cols=2, print_grid=False, subplot_titles=titles)
# fig 1: count distribution for each feature
grouped = data.groupby('target')[feature]
values = grouped.apply(lambda x: x.value_counts(normalize=True)).unstack()
trace0 = go.Bar(x=values.columns, y=values.loc[0], name='Status 0')
trace1 = go.Bar(x=values.columns, y=values.loc[1], name='Status 1')
fig.append_trace(trace0, 1, 1)
fig.append_trace(trace1, 1, 1)
# fig 2: default rate bar plot for each feature
names = list(values.columns)
means = data.groupby(feature)['target'].mean()
stds = data.groupby(feature)['target'].std()
for name, mean, std in zip(names, means[names], stds[names]):
low, high = stats.norm.interval(0.05, loc=mean, scale=std)
er = mean - low
trace = go.Bar(x=[name], y=[mean], error_y=dict(array=[er], visible=True),
name=name, xaxis='x2')
fig.append_trace(trace, 1, 2)
# layout setting
legend = dict(orientation='h', xanchor='auto', y=-0.2)
margin=go.layout.Margin(l=50, r=50, b=50, t=40, pad=4)
fig['layout'].update(xaxis=dict(domain=[0, 0.47]), xaxis2=dict(domain=[0.53, 1]),
yaxis2=dict(anchor='x2'), width=width, height=height,
margin=margin, legend=legend)
fig['layout']['xaxis1'].update(title=feature.capitalize())
fig['layout']['yaxis1'].update(title='Probability Density')
fig['layout']['xaxis2'].update(title=feature.capitalize())
fig['layout']['yaxis2'].update(title='Default Rate')
return fig
def numerical_plot(data, feature, hist_bins=40, scatter_bins=100, log=False, w=1000, h=450):
""" function to plot the numerical variable with Plotly """
# transform into log scale
if log is True:
data = data.copy()
tail = ' (log)'
if np.min(data[feature]) == 0:
data[feature] = np.log(data[feature] + 1)
data[feature] = np.log(data[feature] + 1)
else:
tail = ''
# make subplots
titles = ('Histogram of ' + feature.capitalize() + tail,
'Default Rate vs. ' + feature.capitalize() + tail)
fig = tools.make_subplots(rows=1, cols=2, print_grid=False, subplot_titles=titles)
# fig 1: histogram for different loan status
x0 = data[data['target']==0][feature]
x1 = data[data['target']==1][feature]
# find the minimum and maximum values
start = min(x0.min(), x1.min())
end = max(x0.max(), x1.max())
n_unique = len(data[feature].unique())
if n_unique <= min(end - start + 1, hist_bins):
bin_size = 1
else:
bin_size = (end - start) / hist_bins
# Group data together
hist_data = [x0, x1]
group_labels = ['Status 0', 'Status 1']
# Create distplot
fig1 = ff.create_distplot(hist_data=hist_data, group_labels=group_labels,
bin_size=bin_size, show_rug=False)
displot = fig1['data']
# add histgram into the final figure
fig.append_trace(displot[0], 1, 1)
fig.append_trace(displot[1], 1, 1)
fig.append_trace(displot[2], 1, 1)
fig.append_trace(displot[3], 1, 1)
# fig 2: scatter plot for each feature
cut = pd.cut(data[feature], bins=scatter_bins)
group_median = data[[feature, 'target']].groupby(cut).median()
index = group_median[~group_median[feature].isnull()][feature].values
grouped_mean = data[[feature, 'target']].groupby(cut).mean().fillna(method='pad')
mean = grouped_mean[~group_median[feature].isnull()]['target'].values
grouped_sem = data[[feature, 'target']].groupby(cut).sem().fillna(method='pad')
sem = grouped_sem[~group_median[feature].isnull()]['target'].values
# make figures
lower = go.Scatter(name='Lower Bound', x=index, y=mean - sem, mode='lines',
marker=dict(color="#444"), line=dict(width=0), showlegend=False)
trace = go.Scatter(name='Default Rate', x=index, y=mean, mode='lines',
line=dict(color='rgb(31, 119, 180)', width=1),
fillcolor='rgba(68, 68, 68, 0.3)', fill='tonexty')
upper = go.Scatter(name='Upper Bound', x=index, y=mean + sem, mode='lines',
marker=dict(color="#444"), line=dict(width=0), fill='tonexty',
fillcolor='rgba(68, 68, 68, 0.3)', showlegend=False)
fig.append_trace(lower, 1, 2)
fig.append_trace(trace, 1, 2)
fig.append_trace(upper, 1, 2)
# layout setting
legend = dict(orientation='h', xanchor='auto', y=-0.2)
margin=go.layout.Margin(l=50, r=50, b=50, t=40, pad=4)
fig['layout'].update(xaxis=dict(domain=[0, 0.47]), xaxis2=dict(domain=[0.53, 1]),
yaxis2=dict(anchor='x2'), width=w, height=h,
margin=margin, legend=legend)
fig['layout']['xaxis1'].update(title=feature.capitalize() + tail)
fig['layout']['yaxis1'].update(title='Probability Density')
fig['layout']['xaxis2'].update(title=feature.capitalize() + tail)
fig['layout']['yaxis2'].update(title='Default Rate')
return fig
# def numerical_plot(data, feature, width=800, height=400, bins=50):
# """ function to plot the numerical variable """
# # make subplots
# titles = ('Histogram Plot', 'Default Rate vs. ' + feature.capitalize())
# fig = tools.make_subplots(rows=1, cols=2, print_grid=False, subplot_titles=titles)
#
# # fig 1: histogram for different loan status
# x0 = data[data['target']==0][feature]
# x1 = data[data['target']==1][feature]
#
# # find the minimum and maximum values
# start = min(x0.min(), x1.min())
# end = max(x0.max(), x1.max())
# n_unique = len(data[feature].unique())
# if n_unique <= min(end - start + 1, bins):
# bin_size = 1
# else:
# bin_size = (end - start) / bins
#
# # Group data together
# hist_data = [x0, x1]
# group_labels = ['Status 0', 'Status 1']
#
# # Create distplot
# fig1 = ff.create_distplot(hist_data=hist_data, group_labels=group_labels,
# bin_size=bin_size, show_rug=False)
# displot = fig1['data']
#
# # add histgram into the final figure
# fig.append_trace(displot[0], 1, 1)
# fig.append_trace(displot[1], 1, 1)
# fig.append_trace(displot[2], 1, 1)
# fig.append_trace(displot[3], 1, 1)
#
# # fig 2: scatter plot for each feature
# mean = train.groupby(feature)['target'].mean()
# sem = train.groupby(feature)['target'].sem().fillna(value=0)
# index = mean.index
#
# lower = go.Scatter(x=index, y=mean[index]-sem[index], mode='lines',
# marker=dict(color="#444"), line=dict(width=0),
# showlegend=False)
#
# trace = go.Scatter(name='Default Rate', x=index, y=mean[index],
# line=dict(color='rgb(31, 119, 180)', width=1),
# fillcolor='rgba(68, 68, 68, 0.3)', mode='lines',)
#
# upper = go.Scatter(x=index, y=mean[index]+sem[index], mode='lines',
# marker=dict(color="#444"), line=dict(width=0),
# fill='tonexty', fillcolor='rgba(68, 68, 68, 0.3)',
# showlegend=False)
#
# fig.append_trace(lower, 1, 2)
# fig.append_trace(upper, 1, 2)
# fig.append_trace(trace, 1, 2)
#
# # layout setting
# legend = dict(orientation='h', xanchor='auto', y=-0.2)
# margin=go.layout.Margin(l=50, r=50, b=50, t=40, pad=4)
# fig['layout'].update(xaxis=dict(domain=[0, 0.47]), xaxis2=dict(domain=[0.53, 1]),
# yaxis2=dict(anchor='x2'), width=width, height=height,
# margin=margin, legend=legend)
# fig['layout']['xaxis1'].update(title=feature.capitalize())
# fig['layout']['yaxis1'].update(title='Probability Density')
# fig['layout']['xaxis2'].update(title=feature.capitalize())
# fig['layout']['yaxis2'].update(title='Default Rate')
#
# return fig
#
#
# def categorical_plot(data, feature, width=800, height=400):
# """ function to plot the categorical variable """
# # make subplots
# titles = ('Distribution Plot', 'Default Rate Distribution')
# fig = tools.make_subplots(rows=1, cols=2, print_grid=False, subplot_titles=titles)
#
# # fig 1: count distribution for each feature
# grouped = data.groupby('target')[feature]
# values = grouped.apply(lambda x: x.value_counts(normalize=True)).unstack()
# names = list(values.columns)
# x = ['status 0', 'status 1']
# for name in names:
# trace = go.Bar(x=x, y=list(values[name]), name=name)
# fig.append_trace(trace, 1, 1)
#
# # fig 2: default rate bar plot for each feature
# means = data.groupby(feature)['target'].mean()
# stds = data.groupby(feature)['target'].std()
# for name, mean, std in zip(names, means[names], stds[names]):
# low, high = stats.norm.interval(0.05, loc=mean, scale=std)
# er = mean - low
# trace = go.Bar(x=[name], y=[mean], error_y=dict(array=[er], visible=True),
# name=name, xaxis='x2')
# fig.append_trace(trace, 1, 2)
#
# # layout setting
# legend = dict(orientation='h', xanchor='auto', y=-0.2)
# margin=go.layout.Margin(l=50, r=50, b=50, t=40, pad=4)
# fig['layout'].update(xaxis=dict(domain=[0, 0.47]), xaxis2=dict(domain=[0.53, 1]),
# yaxis2=dict(anchor='x2'), width=width, height=height,
# margin=margin, legend=legend)
# fig['layout']['xaxis1'].update(title='Loan Status')
# fig['layout']['yaxis1'].update(title='Probability Density')
# fig['layout']['xaxis2'].update(title=feature.capitalize())
# fig['layout']['yaxis2'].update(title='Default Rate')
#
# return fig
``` |
{
"source": "Jiganesh/High-On-DSA",
"score": 3
} |
#### File: High-On-DSA/arrays/shift2DGrid.py
```python
class Solution:
# [All Three Accepted]
# Runtime: 160 ms, faster than 96.38% of Python3 online submissions for Shift 2D Grid.
# Memory Usage: 14.4 MB, less than 35.10% of Python3 online submissions for Shift 2D Grid.
# TC : O(M*N)
# SC : O(M*N)
def shiftGrid(self, grid, k: int) :
result = []
for i in grid:
for j in i:
result.append(j)
k=k%(len(grid)*len(grid[0]))
result = result [-k:]+ result [:-k]
pointer=0
for i in range (len(grid)):
for j in range(len(grid[i])):
grid[i][j]= result[pointer]
pointer+=1
return grid
# TC : O(M*N)
# SC : O(M*N)
def shiftGrid(self, grid, k):
m, n = len(grid), len(grid[0])
start = m * n - k % (m * n)
ans = []
for i in range(start, m * n + start):
j = i % (m * n)
r, c = j // n, j % n
if not (j - start) % n:
ans.append([])
ans[-1].append(grid[r][c])
return ans
# TC : O(M*N)
# SC : O(1)
def shiftGrid(self, grid,k) :
row = len(grid)
column = len(grid[0])
totalLength = (row*column)-1 # Pointer on Last Element
k%=(row*column)
def reverse (start , end):
while start < end :
startrow, startcol = (start//column)%row , start%column
endrow, endcol = (end//column)%row, end%column
grid[startrow][startcol] , grid[endrow][endcol] = grid[endrow][endcol], grid[startrow][startcol]
start+=1
end -=1
return grid
reverse(0, totalLength-k)
reverse(totalLength-k+1 , totalLength)
reverse(0, totalLength)
return grid
```
#### File: High-On-DSA/design/designAnATMMachine.py
```python
class ATM:
# Runtime: 689 ms, faster than 83.33% of Python3 online submissions for Design an ATM Machine.
# Memory Usage: 17.5 MB, less than 100.00% of Python3 online submissions for Design an ATM Machine.
def __init__(self):
self.denominations= [20, 50, 100,200,500]
self.noOfDenominations= [0, 0, 0, 0, 0]
def deposit(self, banknotesCount):
for i in range (len(banknotesCount)):
self.noOfDenominations[i]+= banknotesCount[i]
def withdraw(self, amount: int) :
withdrawn =[0, 0,0,0,0]
endPointer = 4
initialState = self.noOfDenominations
while endPointer>=0:
if self.noOfDenominations[endPointer]:
maxNotesAvailabe = min(self.noOfDenominations[endPointer], amount//(self.denominations[endPointer]))
amount-= maxNotesAvailabe * self.denominations[endPointer]
withdrawn[endPointer]= maxNotesAvailabe
endPointer-=1
if amount ==0:
for i in range (len(withdrawn)):
self.noOfDenominations[i]-=withdrawn[i]
return withdrawn
else:
self.noOfDenominations = initialState
return [-1]
```
#### File: High-On-DSA/design/encodeAndDecodeTinyURL.py
```python
class Codec:
# Runtime: 36 ms, faster than 84.95% of Python3 online submissions for Encode and Decode TinyURL.
# Memory Usage: 13.9 MB, less than 69.91% of Python3 online submissions for Encode and Decode TinyURL.
def encode(self, longUrl: str) -> str:
"""Encodes a URL to a shortened URL.
"""
return longUrl
def decode(self, shortUrl: str) -> str:
"""Decodes a shortened URL to its original URL.
"""
return shortUrl
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.decode(codec.encode(url))
```
#### File: High-On-DSA/design/implementStackUsingQueues.py
```python
from collections import *
# https://leetcode.com/problems/implement-stack-using-queues/
class MyStack:
# Runtime: 44 ms, faster than 41.12% of Python3 online submissions for Implement Stack using Queues.
# Memory Usage: 13.9 MB, less than 76.44% of Python3 online submissions for Implement Stack using Queues.
def __init__(self):
self.q = deque ()
def push(self, x: int) -> None:
self.q.append(x)
def pop(self) -> int:
return self.q.pop()
def top(self) -> int:
return self.q[-1]
def empty(self) -> bool:
return len(self.q)==0
# Your MyStack object will be instantiated and called as such:
# obj = MyStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.empty()
```
#### File: High-On-DSA/dynamicProgramming/decodeWays.py
```python
class Solution(object):
# Submitted by Jiganesh
# TC : O(N)
# SC : O(N)
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
dp = { "" : 1 }
def helper(s):
if s in dp :
return dp[s]
first, second = 0,0
if 1<=int(s[0])<=9:
first = helper(s[1:])
if len(s)>=2 and( s[0]=="1" or( s[0]=="2" and 0<=int(s[1])<=6)):
second = helper(s[2:])
dp[s]= first +second
return first+second
return helper(s)
print(Solution().numDecodings("22233"))
```
#### File: High-On-DSA/prefixSum/minimumAverageDifference.py
```python
import math
import operator
from itertools import accumulate
class Solution:
# Runtime: 1148 ms, faster than 25.00% of Python3 online submissions for Minimum Average Difference.
# Memory Usage: 25.3 MB, less than 25.00% of Python3 online submissions for Minimum Average Difference.
def minimumAverageDifference(self, nums) -> int:
prefixSum = list(accumulate(nums, operator.add))
minindex = [-1, float("inf")]
for index , i in enumerate(prefixSum):
leftside = i
numbersinleftside = index+1
rightside = prefixSum[-1]-i
numbersinrightside = len(prefixSum)-numbersinleftside if numbersinleftside < len(prefixSum) else 1
l = math.floor(leftside/numbersinleftside)
r = math.floor(rightside/numbersinrightside)
if minindex[1] > abs(l-r):
minindex = [index, abs(l-r)]
return minindex[0]
```
#### File: High-On-DSA/prefixSum/minimumSizeSubarraySum.py
```python
class Solution:
# TLE Brute Force
def minSubArrayLen(self, target, nums) -> int:
summation = 0
array =[]
for i in nums:
summation+=i
array.append(summation)
minimum =float('inf')
for i in range (len(array)):
for j in range (i+1,len(array)):
if array[j]-array[i] >=target:
minimum = min(j-i, minimum)
return minimum if minimum != float('inf') else 0
def minSubArrayLen(self, target, nums) -> int:
leftPointer = 0
rightPointer = 0
summation = 0
minimumSubarray = float('inf')
for i in nums:
summation+=i
rightPointer+=1
while summation >= target :
minimumSubarray = min(rightPointer - leftPointer, minimumSubarray)
summation-= nums[leftPointer]
leftPointer+=1
return minimumSubarray if minimumSubarray != float('inf') else 0
# Little Efficient that above algorithm as we are using while loop with pointers instead of for loop
# Runtime: 87 ms, faster than 71.41% of Python3 online submissions for Minimum Size Subarray Sum.
# Memory Usage: 16.8 MB, less than 52.83% of Python3 online submissions for Minimum Size Subarray Sum.
def minSubArrayLen(self, target, nums) -> int:
leftPointer = 0
rightPointer = 0
summation = 0
minimumSubarray = float('inf')
while rightPointer< len(nums):
summation+=nums[rightPointer]
rightPointer+=1
while summation >= target :
minimumSubarray = min(rightPointer - leftPointer, minimumSubarray)
summation-= nums[leftPointer]
leftPointer+=1
return minimumSubarray if minimumSubarray != float('inf') else 0
```
#### File: High-On-DSA/prefixSum/runningSumOf1DArray.py
```python
from itertools import accumulate
import operator
class Solution:
# Runtime: 40 ms, faster than 89.27% of Python3 online submissions for Running Sum of 1d Array.
# Memory Usage: 14.2 MB, less than 29.22% of Python3 online submissions for Running Sum of 1d Array.
def runningSum(self, nums) :
return list(accumulate(nums, operator.add))
```
#### File: High-On-DSA/trees/convertBSTToGreaterTree.py
```python
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
# Iterative Approach
# Runtime: 69 ms, faster than 99.27% of Python3 online submissions for Convert BST to Greater Tree.
# Memory Usage: 16.7 MB, less than 53.49% of Python3 online submissions for Convert BST to Greater Tree.
def convertBST(self, root):
lst = []
curr = root
sum = 0
while lst or curr:
while curr:
lst.append(curr)
curr = curr.right
curr = lst.pop()
sum += curr.val
curr.val = sum
curr = curr.left
return root
# Recursive Approach
# Runtime: 82 ms, faster than 89.47% of Python3 online submissions for Convert BST to Greater Tree.
# Memory Usage: 16.7 MB, less than 77.23% of Python3 online submissions for Convert BST to Greater Tree.
def convertBST(self, root):
def helper(root, array):
if root:
helper(root.right, array)
root.val += array[0]
array[0] = root.val
helper(root.left, array)
return root
array = [0]
return helper(root, array)
# Recursive Another Approach
sum=0
def convertBST(self, root):
if (root==None): return None
self.convertBST(root.right) #first get to the rightmost element
self.sum+=root.val #then add the current nodes value to sum
root.val=self.sum #then update current node with sum
self.convertBST(root.left) #travese to check left nodes
return root
``` |
{
"source": "jigangc/rrunner",
"score": 2
} |
#### File: rrunner/common/handle_api_auto.py
```python
import requests
import os
import openpyxl
from rrunner.common.handle_config import config
from rrunner.common.handle_path import DATA_DIR, CASE_DIR
def getByPath(path, obj):
paths = path.split(".")
for path in paths:
obj = obj.get(path, None)
if obj == None:
break
return obj
def dic2String(obj):
rs = ['{']
for kv in obj.items():
rs.append('\"' + kv[0] + "\":\"" + str(kv[1]) + '\"')
rs.append(',')
if len(rs) > 2:
rs.pop()
rs.append('}')
return ''.join(rs)
def parseBody(parItem, body, raw):
if parItem.get('schema', None) != None:
refPath = getByPath('schema.$ref', parItem)
if refPath == None:
# 数组
refPath = getByPath('schema.items.$ref', parItem)
if refPath != None:
refPath = refPath.replace('#/definitions/', '')
refData = getByPath('definitions.' + refPath + '.properties', raw)
if refData != None:
for ri in refData.items():
body[ri[0]] = (0 if ri[1].get('type', None) == 'integer'
else "")
elif parItem.get('description', None) != None:
body['_parms'] = parItem['description']
else:
body[parItem['name']] = ''
else:
body[parItem['name']] = (0 if parItem.get('type', None) == 'integer'
else "")
def writeRow(func, ws, i):
i = str(i)
ws['A' + i] = func['case_id']
ws['B' + i] = func['title']
ws['C' + i] = func['interface']
ws['D' + i] = func['content-type']
ws['E' + i] = func['method']
ws['F' + i] = func['url']
ws['G' + i] = func['data']
ws['H' + i] = func['expected']
ws['I' + i] = func['check_sql']
ws['J' + i] = func['result']
ws['K' + i] = func['tag']
def writeCaseClass(cName):
caseName = 'test_' + cName + '_controller.py'
dataName = 'test_' + cName + '_controller.xlsx'
isExist = os.path.exists(os.path.join(CASE_DIR + "\InnerApi", caseName))
if isExist:
return
f = open(os.path.join(CASE_DIR + "\InnerApi", caseName), 'w')
f.write("import os\n")
f.write("import allure\n")
f.write("import pytest\n")
f.write("from common.handle_excel import Excel\n")
f.write("from common.handle_path import DATA_DIR\n")
f.write("from common.handle_config import config\n")
f.write("from common.requtest_assert import RequestsAssert\n")
f.write("class Test" + cName + ":\n")
f.write(' excel = Excel(os.path.join(DATA_DIR, "{}"), "Sheet")\n'.format(dataName))
f.write(" test_data = excel.read_excel()\n")
f.write(' module = config.get("test_data", "module")\n')
f.write(' if module == "0":\n')
f.write(' for i in range(0, len(test_data) - 1):\n')
f.write(' if None == test_data[i]["tag"]:\n')
f.write(' del (test_data[i])\n')
f.write(' @allure.feature("{}")\n'.format(cName))
f.write(" @pytest.mark.parametrize('item', test_data)\n")
f.write(' def test_' + cName + '(self, item, get_token):\n')
f.write(" headers = get_token\n")
f.write(" res = RequestsAssert.apiRequest(item, headers)\n")
f.write(" write = self.excel.write_excel\n")
f.write(" RequestsAssert.apiAssert(res, item, write)\n")
def writeCase(cName, funcs):
caseName = 'test_' + cName + '_controller.xlsx'
isExist = os.path.exists(os.path.join(DATA_DIR, caseName))
if isExist:
return
wb = openpyxl.Workbook()
ws = wb.active
i = 1
for func in funcs:
writeRow(func, ws, i)
i += 1
wb.save(os.path.join(DATA_DIR, caseName))
def main(catName, rules):
rs = requests.get(config.get("env", "swagger_url"))
raw = rs.json()
paths = getByPath("paths", raw)
funcs = []
lastCName = None
i = 1
keys = paths.keys()
# keys.sort()
keys = sorted(keys)
for pKey in keys:
path = pKey
value = paths[pKey]
cName = path.split('/')[1]
if catName != '*' and cName != catName:
continue
if lastCName != cName and lastCName != None:
writeCase(lastCName, funcs)
writeCaseClass(lastCName)
i = 1
funcs = []
lastCName = cName
method = 'post' if value.get('post', None) != None else 'get'
value = value[method]
params = getByPath("parameters", value)
desc = getByPath("summary", value)
body = {}
query = {}
data = {}
for par in params:
if par['in'] == 'body':
parseBody(par, body, raw)
elif par['in'] == 'query':
query[par['name']] = ''
data = {'query': query, 'body': body}
# if len(body) > 0 and len(query) > 0:
# data = {query: query, body: body}
# else:
# data = body if len(body) > 0 else query
if i == 1:
funcs.append({
'case_id': 'case_id',
'title': 'title',
'content-type': 'content-type',
'interface': 'interface',
'url': 'url',
'method': 'method',
'data': 'data',
'expected': 'expected',
'check_sql': 'check_sql',
'result': 'result',
'tag': 'tag'
})
item = {
'case_id': str(i),
'title': desc,
'content-type': 'union',
'interface': path,
'url': "/smartfactory" + path,
'method': method,
'data': '',
'expected': '{\"innerCode\":"200"}',
'check_sql': '',
'result': '',
'tag': ''
}
if len(body) > 0:
item['content-type'] = 'data'
if len(body) == 1 and body.get('_parms', None) != None:
item['data'] = body['_parms']
else:
item['data'] = dic2String(body)
else:
item['content-type'] = 'params'
item['data'] = dic2String(query)
if method == "post":
item['content-type'] = 'json'
else:
item['content-type'] = 'params'
funcs.append(item)
i += 1
writeCase(lastCName, funcs)
writeCaseClass(lastCName)
def parseArgs():
args = {
'int': {
'min': 0,
'max': 100
},
'string': {
'min': 0,
'max': 100,
'whiteSpace': True,
'required': True
}
}
return args
main('*', parseArgs())
``` |
{
"source": "jigangkim/domain_randomization",
"score": 2
} |
#### File: dr/ppo/train.py
```python
import numpy as np
import torch
import torch.nn.functional as F
from dr.ppo.utils import Dataset, change_lr
def update_params(m_b, pol, val, optims, clip_param):
keys = ('obs', 'acs', 'vtargs', 'atargs', 'pold')
obs, acs, vtargs, atargs, pold = (torch.from_numpy(m_b[i]).float() for i in keys)
vtargs = vtargs.view(-1, 1)
atargs = atargs.view(-1, 1)
# Calculate policy surrogate objective
pnew = pol.prob(obs, acs)
ratio = pnew / pold
surr1 = ratio * atargs
surr2 = torch.clamp(ratio, 1.0 - clip_param, 1.0 + clip_param) * atargs
pol_surr, _ = torch.min(torch.cat((surr1, surr2), dim=1), dim=1)
pol_surr = - torch.sum(pol_surr) / obs.size()[0]
# Calculate value function loss
val_loss = F.mse_loss(val(obs), vtargs)
optims['pol_optim'].zero_grad()
optims['val_optim'].zero_grad()
total_loss = pol_surr + val_loss
total_loss.backward(retain_graph=True)
optims['pol_optim'].step()
optims['val_optim'].step()
@torch.no_grad()
def evaluate_policy(pol, eval_envs):
num_envs = len(eval_envs)
for i, env in enumerate(eval_envs):
env.seed(i)
done = np.array([False] * num_envs)
avg_reward = np.array([0.] * num_envs, dtype=np.float32)
obs = np.stack([env.reset() for env in eval_envs])
while not all(done):
t_obs = torch.from_numpy(obs).float()
_, mean_acs = pol(t_obs)
for i, (env, action) in enumerate(zip(eval_envs, mean_acs)):
if not done[i]:
obs[i], r, d, _ = env.step(action)
avg_reward[i] += r
done[i] = d
avg_reward = np.mean(avg_reward)
return avg_reward
def add_vtarg_and_adv(seg, lam, gamma):
"""
Compute target value using TD(lambda) estimator, and advantage with GAE(lambda)
"""
news = np.append(seg["news"],
0) # last element is only used for last vtarg, but we already zeroed it if last new = 1
vpreds = np.append(seg["vpreds"], seg["nextvpred"])
T = len(seg["rews"])
seg["advs"] = gaelam = np.empty(T, 'float32')
rews = seg["rews"]
lastgaelam = 0
for t in reversed(range(T)):
nonterminal = 1 - news[t + 1]
delta = rews[t] + gamma * vpreds[t + 1] * nonterminal - vpreds[t]
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
seg["tdlamrets"] = seg["advs"] + seg["vpreds"]
def one_train_iter(pol, val, optims,
iter_i, eps_rets_buff, eps_rets_mean_buff, seg_gen,
state_running_m_std, train_params, eval_envs, eval_perfs,
eval_freq=5):
# Extract params
ts_per_batch = train_params['ts_per_batch']
num_timesteps = train_params['num_timesteps']
optim_stepsize = train_params['optim_stepsize']
lam = train_params['lam']
gamma = train_params['gamma']
optim_epoch = train_params['optim_epoch']
optim_batch_size = train_params['optim_batch_size']
clip_param = train_params['clip_param']
# Anneal the learning rate
num_ts_so_far = iter_i * ts_per_batch
lr_mult = max(1.0 - float(num_ts_so_far) / num_timesteps, 0)
change_lr(optims['pol_optim'], optim_stepsize * lr_mult)
change_lr(optims['val_optim'], optim_stepsize * lr_mult)
# Obtain training batch
seg = seg_gen.__next__()
# Update running mean and std of states
state_running_m_std.update(seg['obs'])
eps_rets_buff.extend(seg['ep_rets'])
eps_rets_mean_buff.append((num_ts_so_far, np.mean(eps_rets_buff)))
add_vtarg_and_adv(seg, lam, gamma)
seg['advs'] = (seg['advs'] - seg['advs'].mean()) / seg['advs'].std()
pold = pol.prob(torch.from_numpy(seg['obs']).float(),
torch.from_numpy(seg['acs']).float()).data.numpy()
batch = Dataset(dict(obs=seg['obs'], acs=seg['acs'], atargs=seg['advs'], vtargs=seg['tdlamrets'], pold=pold))
for epoch_i in range(optim_epoch):
for m_b in batch.iterate_once(optim_batch_size):
update_params(m_b, pol, val, optims, clip_param)
if iter_i % eval_freq == 0:
eval_pref = evaluate_policy(pol, eval_envs)
eval_perfs.append(eval_pref)
``` |
{
"source": "jigangkim/nvidia-gpu-scheduler",
"score": 2
} |
#### File: nvidia-gpu-scheduler/nvidia_gpu_scheduler/utils.py
```python
import datetime
import errno
import logging
import numpy as np
import os
import pickle
import pwd
import py3nvml
import random
import string
import sys
import time
from tqdm import tqdm
import traceback
import warnings
# https://stackoverflow.com/questions/6728236/exception-thrown-in-multiprocessing-pool-not-detected
class CatchExceptions(object):
'''
Wrapper for callable enabling child process exception/traceback catching
'''
def __init__(self, callable):
self.__callable = callable
def __call__(self, *args, **kwargs):
try:
result = self.__callable(*args, **kwargs)
except Exception as e:
print(traceback.format_exc())
raise
return result
class ROSRate(object):
'''
http://docs.ros.org/diamondback/api/rostime/html/rate_8cpp_source.html
'''
def __init__(self, frequency):
assert frequency > 0, 'Frequency must be greated than zero!'
self._freq = frequency
self._start = time.time()
self._actual_cycle_time = 1/self._freq
def reset(self):
self._start = time.time()
def sleep(self):
expected_end = self._start + 1/self._freq
actual_end = time.time()
if actual_end < self._start: # detect backward jumps in time
expected_end = actual_end + 1/self._freq
# calculate sleep time
sleep_duration = expected_end - actual_end
# set the actual amount of time the loop took in case the user wants to know
self._actual_cycle_time = actual_end - self._start
# reset start time
self._start = expected_end
if sleep_duration <= 0:
# if we've jumped forward in time, or the loop has taken more than a full extra cycle, reset our cycle
if actual_end > expected_end + 1/self._freq:
self._start = actual_end
return True
return time.sleep(sleep_duration)
def mute_terminal():
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
def prompt_yes_or_no(query):
while True:
response = input(query + ' [Y/n] ').lower()
if response in {'y', 'yes'}:
return True
elif response in {'n', 'no'}:
return False
else:
print('Invalid response!\n')
def log_tqdm(tqdm_obj, config_fname, remove=False):
if remove:
try: os.remove(os.path.join('/tmp', config_fname + '.tqdm'))
except OSError: pass
else:
d = tqdm_obj.format_dict
tqdm_stat = ()
tqdm_stat += (os.environ.get('CUDA_VISIBLE_DEVICES'),)
tqdm_stat += (os.getpid(),)
tqdm_stat += (int(d['n']/d['total']*100),)
tqdm_stat += (d['n'],)
tqdm_stat += (d['total'],)
tqdm_stat += (str(datetime.timedelta(seconds=int(d['elapsed']))),)
try: tqdm_stat += (str(datetime.timedelta(seconds=int((d['total'] - d['n'])/d['rate']))),)
except: tqdm_stat += ('?',)
try: tqdm_stat += (round(d['rate'],2),)
except: tqdm_stat += ('?',)
try:
pickle.dump(tqdm_stat,
open(os.path.join('/tmp', config_fname + '.tqdm'), 'wb')
)
except OSError as e:
if e.errno == errno.ENOENT: print('log_tqdm: No such file of directory')
elif e.errno == errno.ENOSPC: print('log_tqdm: No space left on device')
def get_random_string(length):
letters = string.ascii_letters
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def get_num_procs(allocated_gpus=[], username='all users', version='v1'):
""" Gets the number of processes running on each gpu
Returns
-------
num_procs : list(int)
Number of processes running on each gpu
Note
----
If function can't query the driver will return an empty list rather than raise an
Exception.
Note
----
If function can't get the info from the gpu will return -1 in that gpu's place
"""
if username != 'all users': pwd.getpwnam(username)
# Try connect with NVIDIA drivers
logger = logging.getLogger(__name__)
try:
py3nvml.py3nvml.nvmlInit()
except:
str_ = """Couldn't connect to nvml drivers. Check they are installed correctly."""
warnings.warn(str_, RuntimeWarning)
logger.warn(str_)
return [], []
num_gpus = py3nvml.py3nvml.nvmlDeviceGetCount()
if len(allocated_gpus) == 0:
allocated_gpus = list(range(num_gpus))
else:
assert num_gpus > max(allocated_gpus)
gpu_procs = [-1]*len(allocated_gpus)
gpu_procs_user = [-1]*len(allocated_gpus)
gpu_procs_pid = [[]]*len(allocated_gpus)
for i, gpuid in enumerate(allocated_gpus):
try:
h = py3nvml.py3nvml.nvmlDeviceGetHandleByIndex(gpuid)
except:
continue
procs = py3nvml.utils.try_get_info(py3nvml.py3nvml.nvmlDeviceGetComputeRunningProcesses, h,
['something'])
gpu_procs[i] = len(procs)
procs_user = []
procs_pid = []
for proc in procs:
proc_uid = os.stat('/proc/%d' % (proc.pid)).st_uid
if pwd.getpwuid(proc_uid).pw_name == username or username == 'all users':
procs_user.append(proc)
procs_pid.append(proc.pid)
gpu_procs_user[i] = len(procs_user)
gpu_procs_pid[i] = procs_pid
py3nvml.py3nvml.nvmlShutdown()
if version == 'v1':
return gpu_procs, gpu_procs_user
elif version == 'v2':
return gpu_procs, gpu_procs_user, gpu_procs_pid
def get_gpu_utilization(allocated_gpus=[]):
'''
Gets the utilization rates of each gpu
'''
# Try connect with NVIDIA drivers
logger = logging.getLogger(__name__)
try:
py3nvml.py3nvml.nvmlInit()
except:
str_ = """Couldn't connect to nvml drivers. Check they are installed correctly."""
warnings.warn(str_, RuntimeWarning)
logger.warn(str_)
return []
num_gpus = py3nvml.py3nvml.nvmlDeviceGetCount()
if len(allocated_gpus) == 0:
allocated_gpus = list(range(num_gpus))
else:
assert num_gpus > max(allocated_gpus)
gpu_rates = [-1]*len(allocated_gpus)
for i, gpuid in enumerate(allocated_gpus):
try:
h = py3nvml.py3nvml.nvmlDeviceGetHandleByIndex(gpuid)
except:
continue
rate = py3nvml.utils.try_get_info(py3nvml.py3nvml.nvmlDeviceGetUtilizationRates, h,
['something'])
gpu_rates[i] = rate.gpu
py3nvml.py3nvml.nvmlShutdown()
return gpu_rates
def get_gpumem_utilization(allocated_gpus=[], username='all users', version='v1'):
'''
Gets the memory usage of each gpu
'''
if username != 'all users': pwd.getpwnam(username)
# Try connect with NVIDIA drivers
logger = logging.getLogger(__name__)
try:
py3nvml.py3nvml.nvmlInit()
except:
str_ = """Couldn't connect to nvml drivers. Check they are installed correctly."""
warnings.warn(str_, RuntimeWarning)
logger.warn(str_)
return []
num_gpus = py3nvml.py3nvml.nvmlDeviceGetCount()
if len(allocated_gpus) == 0:
allocated_gpus = list(range(num_gpus))
else:
assert num_gpus > max(allocated_gpus)
mem_rates = [-1]*len(allocated_gpus)
mem_rates_user = [-1]*len(allocated_gpus)
mem_rates_pid = [{}]*len(allocated_gpus)
for i, gpuid in enumerate(allocated_gpus):
try:
h = py3nvml.py3nvml.nvmlDeviceGetHandleByIndex(gpuid)
except:
continue
info = py3nvml.utils.try_get_info(py3nvml.py3nvml.nvmlDeviceGetMemoryInfo, h,
['something'])
procs = py3nvml.utils.try_get_info(py3nvml.py3nvml.nvmlDeviceGetComputeRunningProcesses, h,
['something'])
mem_rates[i] = int(np.ceil(100*info.used/info.total))
mem_user = []
mem_pid = {}
for proc in procs:
proc_uid = os.stat('/proc/%d' % (proc.pid)).st_uid
if pwd.getpwuid(proc_uid).pw_name == username or username == 'all users':
mem_user.append(proc.usedGpuMemory)
mem_pid[proc.pid] = int(np.ceil(100*proc.usedGpuMemory/info.total))
mem_rates_user[i] = int(np.ceil(100*sum(mem_user)/info.total))
mem_rates_pid[i] = mem_pid
py3nvml.py3nvml.nvmlShutdown()
if version == 'v1':
return mem_rates
elif version == 'v2':
return mem_rates, mem_rates_user, mem_rates_pid
if __name__ == "__main__":
stime = time.time()
print(get_num_procs(version='v2', username='jgkim-larr'))
print(get_gpu_utilization())
print(get_gpumem_utilization(version='v2', username='jgkim-larr'))
ftime = time.time()
print(ftime - stime, 'seconds')
```
#### File: nvidia-gpu-scheduler/nvidia_gpu_scheduler/worker.py
```python
from abc import ABC, abstractmethod
from collections import OrderedDict
import copy
import datetime
import dateutil.tz
import getpass
import IPython
import json
from math import ceil, floor
import numpy as np
import multiprocessing
# multiprocessing.set_start_method('spawn', True) # hacky workaround for ptvsd (python debugger for vscode)
from multiprocessing.managers import SyncManager
import os
import pickle
import py3nvml
import queue
import signal
import time
from tqdm import tqdm
from types import SimpleNamespace
from nvidia_gpu_scheduler.utils import get_num_procs, get_gpu_utilization, get_gpumem_utilization
from nvidia_gpu_scheduler.utils import prompt_yes_or_no, mute_terminal as mute, ROSRate, get_random_string
class NVGPUWorker(SyncManager, ABC):
def __init__(self, ip, port, authkey, name=None):
# prevent SIGINT signal from affecting the manager
signal.signal(signal.SIGINT, self._signal_handling)
self.default_handler = signal.getsignal(signal.SIGINT)
self.ip = ip
self.port = port
self.name = 'worker_%s'%(get_random_string(10)) if name is None else name
super(NVGPUWorker, self).__init__(address=(self.ip, self.port), authkey=str.encode(authkey))
print('Configured NVGPUWorker')
print('Resource limits set to default profile:')
self.set_limits()
self.rate = ROSRate(1)
def connect(self, *args, **kwargs):
super(NVGPUWorker, self).connect(*args, **kwargs)
print('NVGPUWorker connected to %s:%s'%(self.ip,self.port))
def set_limits(
self,
available_gpus=[],
gpu_utilization_limit=[],
gpu_job_limit=[],
utilization_margin=0,
max_gpu_mem_usage=50,
time_between_jobs=0,
subprocess_verbose=False,
apply_limits=['user', 'worker'][0]
):
self.limits = SimpleNamespace()
self.limits.available_gpus = available_gpus
self.limits.gpu_utilization_limit = gpu_utilization_limit
self.limits.gpu_job_limit = gpu_job_limit
self.limits.utilization_margin = utilization_margin
self.limits.max_gpu_mem_usage = max_gpu_mem_usage
self.limits.time_between_jobs = time_between_jobs
self.limits.subprocess_verbose = subprocess_verbose
self.limits.apply_limits = apply_limits
print('worker limits set to %s'%(self.limits))
def update_limits(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self.limits, key):
setattr(self.limits, key, value)
print('worker limits updated to %s'%(self.limits))
def view_limits(self):
print('self.limits = %s'%(self.limits))
def run(self):
# Access shared queues
shared_pending_job_q = self.get_pending_job_q()
shared_worker_status_q = self.get_worker_status_q()
# Worker state
self.worker_resume = True
self.worker_terminate = False
procs = {}
running = OrderedDict()
done = OrderedDict()
failed = OrderedDict()
last_job_time = -float('inf')
alpha = np.exp(-3/self.limits.time_between_jobs)
total_gpu_utilization_filt = {gpu_id: 0.0 for gpu_id in self.limits.available_gpus}
user_gpu_utilization_filt = {gpu_id: 0.0 for gpu_id in self.limits.available_gpus}
worker_gpu_utilization_filt = {gpu_id: 0.0 for gpu_id in self.limits.available_gpus}
num_pending = shared_pending_job_q.qsize()
while num_pending + len(running):
curr_user = getpass.getuser()
list_of_gpus = self.limits.available_gpus
max_utilization = self.limits.gpu_utilization_limit
max_jobs_per_gpu = self.limits.gpu_job_limit
# 1. update candidate GPU
total_compute_procs, user_compute_procs, pid_compute_procs = \
get_num_procs(allocated_gpus=list_of_gpus, username=curr_user, version='v2')
worker_compute_procs = copy.deepcopy(user_compute_procs)
total_gpu_utilization = get_gpu_utilization(allocated_gpus=list_of_gpus)
user_gpu_utilization = [ceil(x/(y+1e-12)*z) for x, y, z in zip(user_compute_procs, total_compute_procs, total_gpu_utilization)]
total_gpumem_utilization, user_gpumem_utilization, pid_gpumem_utilization = \
get_gpumem_utilization(allocated_gpus=list_of_gpus, username=curr_user, version='v2')
total_gpu_utilization_filt = [(1 - alpha)*x + alpha*X for x, X in zip(total_gpu_utilization, total_gpu_utilization_filt)]
user_gpu_utilization_filt = [(1 - alpha)*x + alpha*X for x, X in zip(user_gpu_utilization, user_gpu_utilization_filt)]
cand_gpu, cand_gpu_util, cand_gpumem_util = [], [], []
for i, gpuid, in enumerate(list_of_gpus):
if gpuid < 0: # CPU mode
all_pid_compute_procs = [item for sublist in pid_compute_procs for item in sublist]
worker_compute_procs[i] = sum([running[key].pid not in all_pid_compute_procs for key in running])
user_compute_procs[i] = worker_compute_procs[i]
else:
worker_compute_procs[i] = sum([running[key].pid in pid_compute_procs[i] for key in running])
tot_util_cond = total_gpu_utilization_filt[i] <= (100-self.limits.utilization_margin)
tot_memutil_cond = total_gpumem_utilization[i] <= self.limits.max_gpu_mem_usage # (1 - gpu_fraction)*100
user_util_cond = user_gpu_utilization_filt[i] < floor(max_utilization[i]*(100-self.limits.utilization_margin)/100)
user_numproc_cond = user_compute_procs[i] < max_jobs_per_gpu[i] or max_jobs_per_gpu[i] == -1
worker_numproc_cond = worker_compute_procs[i] < max_jobs_per_gpu[i] or max_jobs_per_gpu[i] == -1
if self.limits.apply_limits == 'user':
is_cand = tot_util_cond and user_util_cond and user_numproc_cond and tot_memutil_cond
elif self.limits.apply_limits == 'worker':
is_cand = tot_util_cond and worker_numproc_cond and tot_memutil_cond
else:
is_cand = False
print("Invalid apply_limits. Available options are ['user', 'worker']")
if is_cand:
cand_gpu.append(gpuid)
cand_gpu_util.append(total_gpu_utilization_filt[i])
cand_gpumem_util.append(total_gpumem_utilization[i])
# 2. run job process
if len(cand_gpu) == 0 or time.time() - last_job_time < self.limits.time_between_jobs: # no available GPUs or no queued tasks
pass
else:
min_util_idx = cand_gpu_util.index(min(cand_gpu_util))
min_util_cand_gpu = cand_gpu[min_util_idx]
if min_util_cand_gpu < 0: # CPU mode
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
grab_device_success = True
else:
grab_device_success = py3nvml.grab_gpus(num_gpus=1, gpu_select=[cand_gpu[min_util_idx]], gpu_fraction=(100 - self.limits.max_gpu_mem_usage)/100, max_procs=-1) > 0
if not grab_device_success:
# if for some reason cannot allocate gpu
# print('CUDA_VISIBLE_DEVICES = %s'%(os.environ.get('CUDA_VISIBLE_DEVICES')))
# last_job_time = time.time()
continue
try:
job = shared_pending_job_q.get_nowait() # {'tag': , 'config_byte': , 'worker_args': , 'worker_kwargs': }
num_pending -= 1
# {'tag': path, 'config': json.load(f, object_hook=lambda d : SimpleNamespace(**d)),
# 'worker_args': worker_args, 'worker_kwargs': worker_kwargs}
signal.signal(signal.SIGINT, signal.SIG_IGN)
# recover namespace object if type is json
tmp_filepath = '/tmp/%s'%(job['tag'].replace('/','_'))
if os.path.splitext(tmp_filepath)[-1] == '.json':
with open(tmp_filepath,'wb') as f:
f.write(job['config_byte'])
with open(tmp_filepath,'r') as f:
job['config'] = json.load(f, object_hook=lambda d : SimpleNamespace(**d))
os.remove(tmp_filepath)
job['worker_kwargs'].update({'config': job['config'], 'config_byte': job['config_byte'], 'config_path': job['tag']})
else:
job['worker_kwargs'].update({'config_byte': job['config_byte'], 'config_path': job['tag']})
p = multiprocessing.Process(
target=self.worker,
args=job['worker_args'],
kwargs=job['worker_kwargs']
)
procs[job['tag']] = p
p.start()
running[job['tag']] = p
signal.signal(signal.SIGINT, self.default_handler)
last_job_time = time.time()
except queue.Empty:
pass
except (EOFError, BrokenPipeError) as e:
print('lost connection to server')
# update thread status
ready = []
for key in running:
if not running[key].is_alive(): # call has been executed
ready.append(key)
if running[key].exitcode == 0: # process terminated successfully
done[key] = running[key]
else: # process terminated with errors
failed[key] = running[key]
for key in ready:
running.pop(key)
procs[key].terminate()
# procs[key].close()
procs.pop(key)
# 3. display status
entry_len = 150
print(''.center(entry_len,'+'))
print(datetime.datetime.now(dateutil.tz.tzlocal()).strftime(' %Y/%m/%d_%H:%M:%S ').center(entry_len,'-'))
# worker status
if self.limits.apply_limits == 'user':
print('+ WORKER: %s (apply limits on user %s)'%(self.name, curr_user))
elif self.limits.apply_limits == 'worker':
print('+ WORKER: %s (apply limits on current worker)'%(self.name))
else:
print("Invalid apply_limits. Available options are ['user', 'worker']")
print(('+ (gpu_ids=%s, job_limit=%s, util_limit=%s%%)'%(list_of_gpus, max_jobs_per_gpu, max_utilization)).ljust(entry_len,' '))
for i, gpuid in enumerate(list_of_gpus):
tup = (gpuid,)
tup += (user_compute_procs[i],)
tup += (worker_compute_procs[i],)
tup += (total_compute_procs[i],)
tup += (user_gpu_utilization[i],)
tup += (total_gpu_utilization[i],)
tup += (user_gpumem_utilization[i],)
tup += (total_gpumem_utilization[i],)
print(('+ gpu%d compute processes (%d(%d)/%d) utilization rate (%d%%/%d%%) memory usage (%d%%/%d%%)'%tup).ljust(entry_len,' '))
# job status
print((' %d PENDING '%(num_pending)).center(entry_len,'-'))
# if self.kwargs.get('logging'):
# print((' %d LOGGING '%(len(running))).center(entry_len,'-'))
# else:
# print((' %d RUNNING '%(len(running))).center(entry_len,'-'))
print((' %d LOGGING/RUNNING '%(len(running))).center(entry_len,'-'))
tqdm_stats = []
for key in running:
name_str = os.path.basename(key)
try:
tqdm_stat = pickle.load(open(os.path.join('/tmp', name_str + '.tqdm'), 'rb'))
tqdm_stats.append(tqdm_stat)
tqdm_str = 'gpu%s pid=%d |%d%%| %d/%d [%s<%s, %sit/s]' % tqdm_stat
except:
tqdm_stats.append(None)
tqdm_str = ''
name_str = '+ ' + name_str
print(name_str + tqdm_str.rjust(entry_len-len(name_str)))
print((' %d FAILED '%(len(failed))).center(entry_len,'-'))
for key in failed: print(os.path.basename(key))
print((' %d DONE '%(len(done))).center(entry_len,'-'))
for key in done: print(os.path.basename(key))
print(''.center(entry_len,'+'))
print('+')
# 4. report status to scheduler
try:
shared_worker_status_q.put({
self.name: {
'limit': vars(self.limits),
'status': {
'worker_compute_procs': user_compute_procs,
'total_compute_procs': total_compute_procs,
'worker_gpu_utilization': user_gpu_utilization,
'total_gpu_utilization': total_gpu_utilization,
'worker_gpumem_utilization': user_gpumem_utilization,
'total_gpumem_utilization': total_gpumem_utilization
},
'running': OrderedDict(((key, tqdm_stat) for key, tqdm_stat in zip(running, tqdm_stats))),
'done': OrderedDict(((key, None) for key in done)),
'failed': OrderedDict(((key, None) for key in failed)),
'last_updated': time.time()
}
})
except (EOFError, BrokenPipeError) as e: # lost connection to server
print('lost connection to server')
# 5. SIGINT(ctrl-c) handler
if self.worker_terminate:
self.worker_resume = prompt_yes_or_no('Resume?')
if self.worker_resume:
IPython.embed()
self.worker_terminate = False
if self.worker_terminate:
for key in running:
running[key].terminate()
break
# run while loop every second
self.rate.sleep()
try: num_pending = shared_pending_job_q.qsize()
except (EOFError, BrokenPipeError) as e: print('lost connection to server') # lost connection to server
print('summary - done: %d, failed: %d, halted: %d, pending: %d' % (len(done), len(failed), len(running), num_pending))
def _signal_handling(self, signum, frame):
self.worker_terminate = True
print('pausing worker... Please wait!')
def worker(self, *args, **kwargs):
if not self.limits.subprocess_verbose:
mute()
self.worker_function(*args, **kwargs)
else:
self.worker_function(*args, **kwargs)
@staticmethod
@abstractmethod
def worker_function(*args, config_path=None, config=None, **kwargs):
pass
NVGPUWorker.register('get_pending_job_q')
NVGPUWorker.register('get_worker_status_q')
if __name__ == '__main__':
class MyWorker(NVGPUWorker):
@staticmethod
def worker_function(*args, config_path=None, config=None, **kwargs):
while True: time.sleep(1)
worker = MyWorker('127.0.0.1', 55555, 'hello')
worker.connect()
worker.update_limits(
available_gpus=[0,1],
gpu_utilization_limit=[100,100],
gpu_job_limit=[0,1],
utilization_margin=0,
time_between_jobs=30,
subprocess_verbose=True,
apply_limits='user'
)
worker.run()
``` |
{
"source": "jigargandhi/adventofcode2017",
"score": 4
} |
#### File: jigargandhi/adventofcode2017/day7.py
```python
class Node():
"""
Defines a program in advent of code day 7 puzzle
"""
def __init__(self, name, weight=None, children=None):
"""
"""
self.name = name
self.weight = weight
self.parent = None
self.total_weight = weight
if children is None:
self.children = []
else:
self.children = children
for child in self.children:
child.parent = self
def add_child(self, node):
node.parent = self
self.children.append(node)
def __repr__(self):
return "Node[name = {}, weight = {}, total_weight = {}]"\
.format(self.name, self.weight, self.total_weight)
def get_total_weight(self):
if len(self.children)==0:
return self.weight
else:
return self.weight + sum([ child.get_total_weight() for child in self.children ])
def find_unbalanced_node(self):
if len(self.children) <2:
return None
else:
differentNode = None
for i in range(1, len(self.children)-1):
prev= self.children[i-1].get_total_weight()
current = self.children[i].get_total_weight()
next = self.children[i+1].get_total_weight()
if prev!= current and current!=next:
differentNode = self.children[i]
break
elif prev!=current and current== next:
differentNode = self.children[i-1]
break
elif prev ==current and current!=next:
differentNode = self.children[i+1]
break
else:
continue
if differentNode is not None:
still_unbalanced= differentNode.find_unbalanced_node()
if still_unbalanced is None:
return differentNode
else:
return still_unbalanced
return None
class World():
def __init__(self, file_name):
self.programs = {}
self.file_name = file_name
self.raw_programs = {}
def parse_world(self):
with open(self.file_name, mode='r') as f:
for line in f.readlines():
line = line.strip()
key, weight, children = self.line_parser(line)
if key not in self.raw_programs:
self.raw_programs[key] = (weight, children)
else:
weight, current_children = self.raw_programs[key]
self.raw_programs[key] = (
weight, current_children + children)
print("parsing finished")
def prepare_world(self):
# 1st pass
for program_name in self.raw_programs.keys():
weight, _ = self.raw_programs[program_name]
node = Node(program_name, weight, [])
self.programs[program_name] = node
# 2nd pass
for program_name in self.raw_programs.keys():
parent_program = self.programs[program_name]
_, children = self.raw_programs[program_name]
for child_program_name in children:
child_node = self.programs[child_program_name]
parent_program.add_child(child_node)
def line_parser(self, text_input):
parts = text_input.split("->")
part1 = parts[0].strip()
if len(parts) == 1:
part2 = ""
else:
part2 = parts[1].strip()
part1parts = part1.split("(")
program_name = part1parts[0].strip()
weight = int(part1parts[1][:-1].strip())
if part2 == "":
children = []
else:
children = [k.strip() for k in part2.split(",")]
return program_name, weight, children
def get_random_node(self):
import random
nodeIndex = random.randint(0, len(self.programs)-1)
randomr_key = list(self.programs.keys())[nodeIndex]
node = self.programs[randomr_key]
return node
def propogate_weight(self):
pass
def traverse_node(node):
if node.parent is None:
return node
else:
return traverse_node(node.parent)
world = World("day7_input2.txt")
world.parse_world()
world.prepare_world()
node = world.get_random_node()
world.propogate_weight()
root = traverse_node(node)
# for child in root.children:
# print("{} ({}) ({}) ".format(child.name, child.weight, child.get_total_weight()))
print(root.find_unbalanced_node())
``` |
{
"source": "jigargandhi/UdemyMachineLearning",
"score": 3
} |
#### File: UdemyMachineLearning/Titanic/kernel.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv("input/train.csv")
test = pd.read_csv("input/test.csv")
X = train.iloc[:,[2,4,5]].values
Y = train.iloc[:,[1]].values
Z = test.iloc[:,[1,3,4]].values
"""
Variables of interest:
pClass, Sex, Age,
Treatment:
pClass: Ordinal Variable no treatment required
Sex: LabelEncoder, OneHotEncoder not required as values are only male and female
Age, Impute with mean
"""
# Label Encoding Sex
from sklearn.preprocessing import LabelEncoder
labelEncoder = LabelEncoder()
X[:,1] = labelEncoder.fit_transform(X[:,1])
Z[:,1]= labelEncoder.transform(Z[:,1])
# Imputing Age
from sklearn.preprocessing import Imputer
imputer = Imputer()
imputer.fit(X[:,[2]])
X[:,[2]]= imputer.transform(X[:,[2]])
Z[:,[2]] = imputer.transform(Z[:,[2]])
"""
Models:
LogisticRegression, SVM, KernelSVM, Naive Bayes, Decision Tree, Random Forest
"""
def printAccuracy(d):
return (d[0,0]+d[1,1])/(d[0,0]+d[1,1]+d[0,1]+d[1,0])
#Logistics Regression
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size = 0.2, random_state = 0)
from sklearn.linear_model import LogisticRegression
linear_classifier = LogisticRegression()
linear_classifier .fit(X_train, y_train)
y_pred = linear_classifier .predict(X_test)
from sklearn.metrics import confusion_matrix
linear_cm= confusion_matrix(y_test, y_pred)
# [[93,17],[21,48]]
#SVM
from sklearn.svm import SVC
svc_classifier = SVC(kernel = 'rbf', random_state = 0)
svc_classifier.fit(X_train, y_train)
y_pred= svc_classifier.predict(X_test)
svc_cm = confusion_matrix(y_test, y_pred)
#81%
#RandomForest
from sklearn.ensemble import RandomForestClassifier
rf_classifier = RandomForestClassifier(criterion='entropy', random_state=0)
rf_classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = rf_classifier.predict(X_test)
rf_cm = confusion_matrix(y_test,y_pred)
# Naive Bayes
from sklearn.naive_bayes import GaussianNB
gnb_classifier = GaussianNB()
gnb_classifier.fit(X_train, y_train)
y_pred = gnb_classifier.predict(X_test)
gnb_cm = confusion_matrix(y_test, y_pred)
# 82%
"""
Evaluation:
a. Confusion Matrix, CAP Curve (how?)
"""
print("Linear: ",printAccuracy(linear_cm))
print("SVM: ",printAccuracy(svc_cm))
print("Random Forest: ",printAccuracy(rf_cm))
print("Naive Bayes:", printAccuracy(gnb_cm))
"""
Writing Random Forest result to Output
"""
y_result = rf_classifier.predict(Z)
result = pd.DataFrame({"PassengerId":test.iloc[:,0].values, "Survived": y_result})
result.to_csv("output/result.csv",index = None)
``` |
{
"source": "JigarJoshi04/tardis",
"score": 3
} |
#### File: tardis/widgets/kromer_plot.py
```python
import numpy as np
import pandas as pd
import astropy.units as u
import astropy.modeling.blackbody as abb
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as clr
import plotly.graph_objects as go
from tardis.util.base import atomic_number2element_symbol
from tardis.widgets import plot_util as pu
class KromerData:
"""The data of simulation model which is used by Kromer Plot.
This preprocesses the data required by KromerPlotter class for doing
calculations and plotting.
"""
def __init__(
self,
last_interaction_type,
last_line_interaction_in_id,
last_line_interaction_out_id,
last_line_interaction_in_nu,
lines_df,
packet_nus,
packet_energies,
r_inner,
spectrum_delta_frequency,
spectrum_frequency_bins, # stores _frequency (bin edges) not frequency
spectrum_luminosity_density_lambda,
spectrum_wavelength,
t_inner,
time_of_simulation,
):
"""
Initialize the KromerData with required properties of simulation model.
Parameters
----------
last_interaction_type : np.array
Interaction type (no-interaction: -1, e-scattering: 1 and
line interaction: 2) values of emitted packets
last_line_interaction_in_id : np.array
IDs of atomic lines with which emitted packet had their last
absorption (interaction in)
last_line_interaction_out_id : np.array
IDs of atomic lines with which emitted packet had their last
emission (interaction out)
last_line_interaction_in_nu : np.array
Frequency values of the last absorption of emitted packets
lines_df : pd.DataFrame
Data about the atomic lines present in simulation model's plasma
packet_nus : astropy.Quantity
Frequency values of the last emission of emitted packets, having
unit of Hz
packet_energies : astropy.Quantity
Energy values of emitted packets, having unit of erg
r_inner : astropy.Quantity
Radius of innermost shell, having unit of cm
spectrum_delta_frequency : astropy.Quantity
Frequency bin width of spectrum, having unit of Hz
spectrum_frequency_bins : astropy.Quantity
Frequency bin edges of spectrum, having unit of Hz
spectrum_wavelength : astropy.Quantity
Wavelength values of spectrum, having unit of Angstrom
t_inner : astropy.Quantity
Temperature of innermost shell, having unit of K
time_of_simulation : astropy.Quantity
Time of simulation, having unit of s (second)
"""
# Save packets properties in a dataframe for easier data manipulation
self.packets_df = pd.DataFrame(
{
"nus": packet_nus,
"lambdas": packet_nus.to("angstrom", u.spectral()),
"energies": packet_energies,
"last_interaction_type": last_interaction_type,
"last_line_interaction_out_id": last_line_interaction_out_id,
"last_line_interaction_in_id": last_line_interaction_in_id,
"last_line_interaction_in_nu": last_line_interaction_in_nu,
}
)
# Save other properties
self.lines_df = lines_df
self.r_inner = r_inner
self.spectrum_delta_frequency = spectrum_delta_frequency
self.spectrum_frequency_bins = spectrum_frequency_bins
self.spectrum_frequency = spectrum_frequency_bins[:-1]
self.spectrum_luminosity_density_lambda = (
spectrum_luminosity_density_lambda
)
self.spectrum_wavelength = spectrum_wavelength
self.t_inner = t_inner
self.time_of_simulation = time_of_simulation
# Create dataframe of packets that experience line interaction
line_mask = (self.packets_df["last_interaction_type"] > -1) & (
self.packets_df["last_line_interaction_in_id"] > -1
) # & operator is quite faster than np.logical_and on pd.Series
self.packets_df_line_interaction = self.packets_df.loc[line_mask].copy()
# Add columns for atomic number of last interaction in/out
self.packets_df_line_interaction["last_line_interaction_out_atom"] = (
self.lines_df["atomic_number"]
.iloc[
self.packets_df_line_interaction["last_line_interaction_out_id"]
]
.to_numpy()
)
self.packets_df_line_interaction["last_line_interaction_in_atom"] = (
self.lines_df["atomic_number"]
.iloc[
self.packets_df_line_interaction["last_line_interaction_in_id"]
]
.to_numpy()
)
@classmethod
def from_simulation(cls, sim, packets_mode):
"""
Create an instance of KromerData from a TARDIS simulation object.
Parameters
----------
sim : tardis.simulation.Simulation
TARDIS Simulation object produced by running a simulation
packets_mode : {'virtual', 'real'}
Mode of packets to be considered, either real or virtual
Returns
-------
KromerData
"""
# Properties common among both packet modes
lines_df = sim.plasma.atomic_data.lines.reset_index().set_index(
"line_id"
)
r_inner = sim.model.r_inner
t_inner = sim.model.t_inner
time_of_simulation = sim.runner.time_of_simulation
if packets_mode == "virtual":
return cls(
last_interaction_type=sim.runner.virt_packet_last_interaction_type,
last_line_interaction_in_id=sim.runner.virt_packet_last_line_interaction_in_id,
last_line_interaction_out_id=sim.runner.virt_packet_last_line_interaction_out_id,
last_line_interaction_in_nu=sim.runner.virt_packet_last_interaction_in_nu,
lines_df=lines_df,
packet_nus=u.Quantity(sim.runner.virt_packet_nus, "Hz"),
packet_energies=u.Quantity(
sim.runner.virt_packet_energies, "erg"
),
r_inner=r_inner,
spectrum_delta_frequency=sim.runner.spectrum_virtual.delta_frequency,
spectrum_frequency_bins=sim.runner.spectrum_virtual._frequency,
spectrum_luminosity_density_lambda=sim.runner.spectrum_virtual.luminosity_density_lambda,
spectrum_wavelength=sim.runner.spectrum_virtual.wavelength,
t_inner=t_inner,
time_of_simulation=time_of_simulation,
)
elif packets_mode == "real":
# Packets-specific properties need to be only for those packets
# which got emitted
return cls(
last_interaction_type=sim.runner.last_interaction_type[
sim.runner.emitted_packet_mask
],
last_line_interaction_in_id=sim.runner.last_line_interaction_in_id[
sim.runner.emitted_packet_mask
],
last_line_interaction_out_id=sim.runner.last_line_interaction_out_id[
sim.runner.emitted_packet_mask
],
last_line_interaction_in_nu=sim.runner.last_interaction_in_nu[
sim.runner.emitted_packet_mask
],
lines_df=lines_df,
packet_nus=sim.runner.output_nu[sim.runner.emitted_packet_mask],
packet_energies=sim.runner.output_energy[
sim.runner.emitted_packet_mask
],
r_inner=r_inner,
spectrum_delta_frequency=sim.runner.spectrum.delta_frequency,
spectrum_frequency_bins=sim.runner.spectrum._frequency,
spectrum_luminosity_density_lambda=sim.runner.spectrum.luminosity_density_lambda,
spectrum_wavelength=sim.runner.spectrum.wavelength,
t_inner=t_inner,
time_of_simulation=time_of_simulation,
)
else:
raise ValueError(
"Invalid value passed to packets_mode. Only "
"allowed values are 'virtual' or 'real'"
)
@classmethod
def from_hdf(cls, hdf_fpath, packets_mode):
"""
Create an instance of KromerData from a simulation HDF file.
Parameters
----------
hdf_fpath : str
Valid path to the HDF file where simulation is saved
packets_mode : {'virtual', 'real'}
Mode of packets to be considered, either real or virtual
Returns
-------
KromerData
"""
with pd.HDFStore(hdf_fpath, "r") as hdf:
lines_df = (
hdf["/simulation/plasma/lines"]
.reset_index()
.set_index("line_id")
)
r_inner = u.Quantity(
hdf["/simulation/model/r_inner"].to_numpy(), "cm"
) # Convert pd.Series to np.array to construct quantity from it
t_inner = u.Quantity(hdf["/simulation/model/scalars"].t_inner, "K")
time_of_simulation = u.Quantity(
hdf["/simulation/runner/scalars"].time_of_simulation, "s"
)
if packets_mode == "virtual":
return cls(
last_interaction_type=hdf[
"/simulation/runner/virt_packet_last_interaction_type"
],
last_line_interaction_in_id=hdf[
"/simulation/runner/virt_packet_last_line_interaction_in_id"
],
last_line_interaction_out_id=hdf[
"/simulation/runner/virt_packet_last_line_interaction_out_id"
],
last_line_interaction_in_nu=u.Quantity(
hdf[
"/simulation/runner/virt_packet_last_interaction_in_nu"
].to_numpy(),
"Hz",
),
lines_df=lines_df,
packet_nus=u.Quantity(
hdf["/simulation/runner/virt_packet_nus"].to_numpy(),
"Hz",
),
packet_energies=u.Quantity(
hdf[
"/simulation/runner/virt_packet_energies"
].to_numpy(),
"erg",
),
r_inner=r_inner,
spectrum_delta_frequency=u.Quantity(
hdf[
"/simulation/runner/spectrum_virtual/scalars"
].delta_frequency,
"Hz",
),
spectrum_frequency_bins=u.Quantity(
hdf[
"/simulation/runner/spectrum_virtual/_frequency"
].to_numpy(),
"Hz",
),
spectrum_luminosity_density_lambda=u.Quantity(
hdf[
"/simulation/runner/spectrum_virtual/luminosity_density_lambda"
].to_numpy(),
"erg / s cm", # luminosity_density_lambda is saved in hdf in CGS
).to("erg / s AA"),
spectrum_wavelength=u.Quantity(
hdf[
"/simulation/runner/spectrum_virtual/wavelength"
].to_numpy(),
"cm", # wavelength is saved in hdf in CGS
).to("AA"),
t_inner=t_inner,
time_of_simulation=time_of_simulation,
)
elif packets_mode == "real":
emitted_packet_mask = hdf[
"/simulation/runner/emitted_packet_mask"
].to_numpy()
return cls(
# First convert series read from hdf to array before masking
# to eliminate index info which creates problems otherwise
last_interaction_type=hdf[
"/simulation/runner/last_interaction_type"
].to_numpy()[emitted_packet_mask],
last_line_interaction_in_id=hdf[
"/simulation/runner/last_line_interaction_in_id"
].to_numpy()[emitted_packet_mask],
last_line_interaction_out_id=hdf[
"/simulation/runner/last_line_interaction_out_id"
].to_numpy()[emitted_packet_mask],
last_line_interaction_in_nu=u.Quantity(
hdf[
"/simulation/runner/last_interaction_in_nu"
].to_numpy()[emitted_packet_mask],
"Hz",
),
lines_df=lines_df,
packet_nus=u.Quantity(
hdf["/simulation/runner/output_nu"].to_numpy()[
emitted_packet_mask
],
"Hz",
),
packet_energies=u.Quantity(
hdf["/simulation/runner/output_energy"].to_numpy()[
emitted_packet_mask
],
"erg",
),
r_inner=r_inner,
spectrum_delta_frequency=u.Quantity(
hdf[
"/simulation/runner/spectrum/scalars"
].delta_frequency,
"Hz",
),
spectrum_frequency_bins=u.Quantity(
hdf[
"/simulation/runner/spectrum/_frequency"
].to_numpy(),
"Hz",
),
spectrum_luminosity_density_lambda=u.Quantity(
hdf[
"/simulation/runner/spectrum/luminosity_density_lambda"
].to_numpy(),
"erg / s cm",
).to("erg / s AA"),
spectrum_wavelength=u.Quantity(
hdf[
"/simulation/runner/spectrum/wavelength"
].to_numpy(),
"cm",
).to("AA"),
t_inner=t_inner,
time_of_simulation=time_of_simulation,
)
else:
raise ValueError(
"Invalid value passed to packets_mode. Only "
"allowed values are 'virtual' or 'real'"
)
class KromerPlotter:
"""Plotting interface to generate Kromer Plot for a simulation model."""
def __init__(self, data):
"""
Initialize the KromerPlotter with required data of simulation model.
Parameters
----------
data : dict of KromerData
Dictionary to store data required for Kromer plot, for both packet
modes i.e. real and virtual
"""
self.data = data
@classmethod
def from_simulation(cls, sim):
"""
Create an instance of KromerPlotter from a TARDIS simulation object.
Parameters
----------
sim : tardis.simulation.Simulation
TARDIS Simulation object produced by running a simulation
Returns
-------
KromerPlotter
"""
return cls(
dict(
virtual=KromerData.from_simulation(sim, "virtual"),
real=KromerData.from_simulation(sim, "real"),
)
)
@classmethod
def from_hdf(cls, hdf_fpath):
"""
Create an instance of KromerPlotter from a simulation HDF file.
Parameters
----------
hdf_fpath : str
Valid path to the HDF file where simulation is saved
Returns
-------
KromerPlotter
"""
return cls(
dict(
virtual=KromerData.from_hdf(hdf_fpath, "virtual"),
real=KromerData.from_hdf(hdf_fpath, "real"),
)
)
def _calculate_plotting_data(
self, packets_mode, packet_wvl_range, distance
):
"""
Calculate data to be used in plotting based on parameters passed.
Parameters
----------
packets_mode : {'virtual', 'real'}
Mode of packets to be considered, either real or virtual
packet_wvl_range : astropy.Quantity
Wavelength range to restrict the analysis of escaped packets. It
should be a quantity having units of Angstrom, containing two
values - lower lambda and upper lambda i.e.
[lower_lambda, upper_lambda] * u.AA
distance : astropy.Quantity
Distance used to calculate flux instead of luminosities in the plot.
Preferrably having units of cm.
Notes
-----
It doesn't return the calculated properties but save them in instance
itself. So it should be always called before starting plotting to
update the plotting data based on parameters passed.
"""
if packets_mode not in ["virtual", "real"]:
raise ValueError(
"Invalid value passed to packets_mode. Only "
"allowed values are 'virtual' or 'real'"
)
# Store the plottable range of each spectrum property which is
# same as entire range, initially
self.plot_frequency_bins = self.data[
packets_mode
].spectrum_frequency_bins
self.plot_wavelength = self.data[packets_mode].spectrum_wavelength
self.plot_frequency = self.data[packets_mode].spectrum_frequency
# Filter their plottable range based on packet_wvl_range specified
if packet_wvl_range:
packet_nu_range = packet_wvl_range.to("Hz", u.spectral())
# Index of value just before the 1st value that is > packet_nu_range[1]
start_idx = (
np.argmax(self.plot_frequency_bins > packet_nu_range[1]) - 1
)
# Index of value just after the last value that is < packet_nu_range[0]
end_idx = np.argmin(self.plot_frequency_bins < packet_nu_range[0])
self.plot_frequency_bins = self.plot_frequency_bins[
start_idx : end_idx + 1
]
# Since spectrum frequency (& hence wavelength) were created from
# frequency_bins[:-1], so we exclude end_idx when creating the mask
self.packet_wvl_range_mask = np.zeros(
self.plot_wavelength.size, dtype=bool
)
self.packet_wvl_range_mask[start_idx:end_idx] = True
self.plot_wavelength = self.plot_wavelength[
self.packet_wvl_range_mask
]
self.plot_frequency = self.plot_frequency[
self.packet_wvl_range_mask
]
else:
self.packet_wvl_range_mask = np.ones(
self.plot_wavelength.size, dtype=bool
)
# Make sure number of bin edges are always one more than wavelengths
assert self.plot_frequency_bins.size == self.plot_wavelength.size + 1
# Calculate the area term to convert luminosity to flux
if distance is None:
self.lum_to_flux = 1 # so that this term will have no effect
else:
self.lum_to_flux = 4.0 * np.pi * (distance.to("cm")) ** 2
# Calculate luminosities to be shown in plot
(
self.emission_luminosities_df,
self.elements,
) = self._calculate_emission_luminosities(
packets_mode=packets_mode, packet_wvl_range=packet_wvl_range
)
self.absorption_luminosities_df = (
self._calculate_absorption_luminosities(
packets_mode=packets_mode, packet_wvl_range=packet_wvl_range
)
)
self.photosphere_luminosity = self._calculate_photosphere_luminosity(
packets_mode=packets_mode
)
self.modeled_spectrum_luminosity = (
self.data[packets_mode].spectrum_luminosity_density_lambda[
self.packet_wvl_range_mask
]
/ self.lum_to_flux
)
def _calculate_emission_luminosities(self, packets_mode, packet_wvl_range):
"""
Calculate luminosities for the emission part of Kromer plot.
Parameters
----------
packets_mode : {'virtual', 'real'}
Mode of packets to be considered, either real or virtual
packet_wvl_range : astropy.Quantity
Wavelength range to restrict the analysis of escaped packets. It
should be a quantity having units of Angstrom, containing two
values - lower lambda and upper lambda i.e.
[lower_lambda, upper_lambda] * u.AA
Returns
-------
luminosities_df : pd.DataFrame
Dataframe containing luminosities contributed by no interaction,
only e-scattering and emission with each element present
elements_present: np.array
Atomic numbers of the elements with which packets of specified
wavelength range interacted
"""
# Calculate masks to be applied on packets data based on packet_wvl_range
if packet_wvl_range is None:
self.packet_nu_range_mask = np.ones(
self.data[packets_mode].packets_df.shape[0], dtype=bool
)
self.packet_nu_line_range_mask = np.ones(
self.data[packets_mode].packets_df_line_interaction.shape[0],
dtype=bool,
)
else:
packet_nu_range = packet_wvl_range.to("Hz", u.spectral())
self.packet_nu_range_mask = (
self.data[packets_mode].packets_df["nus"] < packet_nu_range[0]
) & (self.data[packets_mode].packets_df["nus"] > packet_nu_range[1])
self.packet_nu_line_range_mask = (
self.data[packets_mode].packets_df_line_interaction["nus"]
< packet_nu_range[0]
) & (
self.data[packets_mode].packets_df_line_interaction["nus"]
> packet_nu_range[1]
)
# Histogram weights are packet luminosities or flux
weights = (
self.data[packets_mode].packets_df["energies"][
self.packet_nu_range_mask
]
/ self.lum_to_flux
) / self.data[packets_mode].time_of_simulation
luminosities_df = pd.DataFrame(index=self.plot_wavelength)
# Contribution of packets which experienced no interaction ------------
# Mask to select packets with no interaction
mask_noint = (
self.data[packets_mode].packets_df["last_interaction_type"][
self.packet_nu_range_mask
]
== -1
)
# Calculate weighted histogram of packet frequencies for
# plottable range of frequency bins
hist_noint = np.histogram(
self.data[packets_mode].packets_df["nus"][
self.packet_nu_range_mask
][mask_noint],
bins=self.plot_frequency_bins,
weights=weights[mask_noint],
density=False,
)
# Convert histogram (luminosity values) to luminosity density lambda
L_nu_noint = (
hist_noint[0]
* u.erg
/ u.s
/ self.data[packets_mode].spectrum_delta_frequency
)
L_lambda_noint = L_nu_noint * self.plot_frequency / self.plot_wavelength
# Save it in df
luminosities_df["noint"] = L_lambda_noint.value
# Contribution of packets which only experienced electron scattering ---
mask_escatter = (
self.data[packets_mode].packets_df["last_interaction_type"][
self.packet_nu_range_mask
]
== 1
) & (
self.data[packets_mode].packets_df["last_line_interaction_in_id"][
self.packet_nu_range_mask
]
== -1
)
hist_escatter = np.histogram(
self.data[packets_mode].packets_df["nus"][
self.packet_nu_range_mask
][mask_escatter],
bins=self.plot_frequency_bins,
weights=weights[mask_escatter],
density=False,
)
L_nu_escatter = (
hist_escatter[0]
* u.erg
/ u.s
/ self.data[packets_mode].spectrum_delta_frequency
)
L_lambda_escatter = (
L_nu_escatter * self.plot_frequency / self.plot_wavelength
)
luminosities_df["escatter"] = L_lambda_escatter.value
# Group packets_df by atomic number of elements with which packets
# had their last emission (interaction out)
packets_df_grouped = (
self.data[packets_mode]
.packets_df_line_interaction.loc[self.packet_nu_line_range_mask]
.groupby(by="last_line_interaction_out_atom")
)
# Contribution of each element with which packets interacted ----------
for atomic_number, group in packets_df_grouped:
# Histogram of specific element
hist_el = np.histogram(
group["nus"],
bins=self.plot_frequency_bins,
weights=group["energies"]
/ self.data[packets_mode].time_of_simulation,
)
# Convert to luminosity density lambda
L_nu_el = (
hist_el[0]
* u.erg
/ u.s
/ self.data[packets_mode].spectrum_delta_frequency
)
L_lambda_el = L_nu_el * self.plot_frequency / self.plot_wavelength
luminosities_df[atomic_number] = L_lambda_el.value
# Create an array of the elements with which packets interacted
elements_present = np.array(list(packets_df_grouped.groups.keys()))
return luminosities_df, elements_present
def _calculate_absorption_luminosities(
self, packets_mode, packet_wvl_range
):
"""
Calculate luminosities for the absorption part of Kromer plot.
Parameters
----------
packets_mode : {'virtual', 'real'}
Mode of packets to be considered, either real or virtual
packet_wvl_range : astropy.Quantity
Wavelength range to restrict the analysis of escaped packets. It
should be a quantity having units of Angstrom, containing two
values - lower lambda and upper lambda i.e.
[lower_lambda, upper_lambda] * u.AA
Returns
-------
pd.DataFrame
Dataframe containing luminosities contributed by absorption with
each element present
"""
# Calculate masks to be applied on packets data based on packet_wvl_range
if packet_wvl_range is None:
self.packet_nu_line_range_mask = np.ones(
self.data[packets_mode].packets_df_line_interaction.shape[0],
dtype=bool,
)
else:
packet_nu_range = packet_wvl_range.to("Hz", u.spectral())
self.packet_nu_line_range_mask = (
self.data[packets_mode].packets_df_line_interaction[
"last_line_interaction_in_nu"
]
< packet_nu_range[0]
) & (
self.data[packets_mode].packets_df_line_interaction[
"last_line_interaction_in_nu"
]
> packet_nu_range[1]
)
luminosities_df = pd.DataFrame(index=self.plot_wavelength)
# Group packets_df by atomic number of elements with which packets
# had their last absorption (interaction in)
packets_df_grouped = (
self.data[packets_mode]
.packets_df_line_interaction.loc[self.packet_nu_line_range_mask]
.groupby(by="last_line_interaction_in_atom")
)
for atomic_number, group in packets_df_grouped:
# Histogram of specific element
hist_el = np.histogram(
group["last_line_interaction_in_nu"],
bins=self.plot_frequency_bins,
weights=group["energies"]
/ self.data[packets_mode].time_of_simulation,
)
# Convert to luminosity density lambda
L_nu_el = (
hist_el[0]
* u.erg
/ u.s
/ self.data[packets_mode].spectrum_delta_frequency
)
L_lambda_el = L_nu_el * self.plot_frequency / self.plot_wavelength
luminosities_df[atomic_number] = L_lambda_el.value
return luminosities_df
def _calculate_photosphere_luminosity(self, packets_mode):
"""
Calculate blackbody luminosity of the photosphere.
Parameters
----------
packets_mode : {'virtual', 'real'}
Mode of packets to be considered, either real or virtual
Returns
-------
astropy.Quantity
Luminosity density lambda (or Flux) of photosphere (inner boundary
of TARDIS simulation)
"""
L_lambda_ph = (
abb.blackbody_lambda(
self.plot_wavelength,
self.data[packets_mode].t_inner,
)
* 4
* np.pi ** 2
* self.data[packets_mode].r_inner[0] ** 2
* u.sr
).to("erg / (AA s)")
return L_lambda_ph / self.lum_to_flux
def generate_plot_mpl(
self,
packets_mode="virtual",
packet_wvl_range=None,
distance=None,
show_modeled_spectrum=True,
ax=None,
figsize=(10, 7),
cmapname="jet",
):
"""
Generate Kromer Plot using matplotlib.
Parameters
----------
packets_mode : {'virtual', 'real'}, optional
Mode of packets to be considered, either real or virtual. Default
value is 'virtual'
packet_wvl_range : astropy.Quantity or None, optional
Wavelength range to restrict the analysis of escaped packets. It
should be a quantity having units of Angstrom, containing two
values - lower lambda and upper lambda i.e.
[lower_lambda, upper_lambda] * u.AA. Default value is None
distance : astropy.Quantity or None, optional
Distance used to calculate flux instead of luminosities in the plot.
Preferrably having units of cm. Default value is None
show_modeled_spectrum : bool, optional
Whether to show modeled spectrum in Kromer Plot. Default value is
True
ax : matplotlib.axes._subplots.AxesSubplot or None, optional
Axis on which to create plot. Default value is None which will
create plot on a new figure's axis.
figsize : tuple, optional
Size of the matplotlib figure to display. Default value is (10, 7)
cmapname : str, optional
Name of matplotlib colormap to be used for showing elements.
Default value is "jet"
Returns
-------
matplotlib.axes._subplots.AxesSubplot
Axis on which Kromer Plot is created
"""
# Calculate data attributes required for plotting
# and save them in instance itself
self._calculate_plotting_data(
packets_mode=packets_mode,
packet_wvl_range=packet_wvl_range,
distance=distance,
)
if ax is None:
self.ax = plt.figure(figsize=figsize).add_subplot(111)
else:
self.ax = ax
# Set colormap to be used in elements of emission and absorption plots
self.cmap = cm.get_cmap(cmapname, self.elements.size)
self._plot_emission_mpl()
self._plot_absorption_mpl()
# Plot modeled spectrum
if show_modeled_spectrum:
self.ax.plot(
self.plot_wavelength,
self.modeled_spectrum_luminosity,
"--b",
label=f"{packets_mode.capitalize()} Spectrum",
linewidth=1,
)
# Plot photosphere
self.ax.plot(
self.plot_wavelength,
self.photosphere_luminosity,
"--r",
label="Blackbody Photosphere",
)
self._show_colorbar_mpl()
# Set legends and labels
self.ax.legend(fontsize=12)
self.ax.set_xlabel(r"Wavelength $[\AA]$", fontsize=12)
if distance: # Set y-axis label for flux
self.ax.set_ylabel(
r"$F_{\lambda}$ [erg/s/$\AA/cm^{2}$]", fontsize=12
)
else: # Set y-axis label for luminosity
self.ax.set_ylabel(r"$L_{\lambda}$ [erg/s/$\AA$]", fontsize=12)
return plt.gca()
def _plot_emission_mpl(self):
"""Plot emission part of the Kromer Plot using matplotlib."""
# To create stacked area chart in matplotlib, we will start with zero
# lower level and will keep adding luminosities to it (upper level)
lower_level = np.zeros(self.emission_luminosities_df.shape[0])
upper_level = (
lower_level + self.emission_luminosities_df.noint.to_numpy()
)
self.ax.fill_between(
self.plot_wavelength,
lower_level,
upper_level,
color="black",
label="No interaction",
)
lower_level = upper_level
upper_level = (
lower_level + self.emission_luminosities_df.escatter.to_numpy()
)
self.ax.fill_between(
self.plot_wavelength,
lower_level,
upper_level,
color="grey",
label="Electron Scatter Only",
)
elements_z = self.emission_luminosities_df.columns[2:].to_list()
nelements = len(elements_z)
# Contribution from each element
for i, atomic_number in enumerate(elements_z):
lower_level = upper_level
upper_level = (
lower_level
+ self.emission_luminosities_df[atomic_number].to_numpy()
)
self.ax.fill_between(
self.plot_wavelength,
lower_level,
upper_level,
color=self.cmap(i / nelements),
cmap=self.cmap,
linewidth=0,
)
def _plot_absorption_mpl(self):
"""Plot absorption part of the Kromer Plot using matplotlib."""
lower_level = np.zeros(self.absorption_luminosities_df.shape[0])
elements_z = self.absorption_luminosities_df.columns.to_list()
for i, atomic_number in enumerate(elements_z):
# To plot absorption part along -ve X-axis, we will start with
# zero upper level and keep subtracting luminosities to it (lower
# level) - fill from upper to lower level
upper_level = lower_level
lower_level = (
upper_level
- self.absorption_luminosities_df[atomic_number].to_numpy()
)
self.ax.fill_between(
self.plot_wavelength,
upper_level,
lower_level,
color=self.cmap(i / len(elements_z)),
cmap=self.cmap,
linewidth=0,
)
def _show_colorbar_mpl(self):
"""Show matplotlib colorbar with labels of elements mapped to colors."""
color_values = [
self.cmap(i / self.elements.size) for i in range(self.elements.size)
]
custcmap = clr.ListedColormap(color_values)
norm = clr.Normalize(vmin=0, vmax=self.elements.size)
mappable = cm.ScalarMappable(norm=norm, cmap=custcmap)
mappable.set_array(np.linspace(1, self.elements.size + 1, 256))
cbar = plt.colorbar(mappable, ax=self.ax)
bounds = np.arange(self.elements.size) + 0.5
cbar.set_ticks(bounds)
elements_name = [
atomic_number2element_symbol(atomic_num)
for atomic_num in self.elements
]
cbar.set_ticklabels(elements_name)
def generate_plot_ply(
self,
packets_mode="virtual",
packet_wvl_range=None,
distance=None,
observed_spectrum=None,
show_modeled_spectrum=True,
fig=None,
graph_height=600,
cmapname="jet",
):
"""
Generate interactive Kromer Plot using plotly.
Parameters
----------
packets_mode : {'virtual', 'real'}, optional
Mode of packets to be considered, either real or virtual. Default
value is 'virtual'
packet_wvl_range : astropy.Quantity or None, optional
Wavelength range to restrict the analysis of escaped packets. It
should be a quantity having units of Angstrom, containing two
values - lower lambda and upper lambda i.e.
[lower_lambda, upper_lambda] * u.AA. Default value is None
distance : astropy.Quantity or None, optional
Distance used to calculate flux instead of luminosities in the plot.
Preferrably having units of cm. Default value is None
show_modeled_spectrum : bool, optional
Whether to show modeled spectrum in Kromer Plot. Default value is
True
fig : plotly.graph_objs._figure.Figure or None, optional
Figure object on which to create plot. Default value is None which
will create plot on a new Figure object.
graph_height : int, optional
Height (in px) of the plotly graph to display. Default value is 600
cmapname : str, optional
Name of the colormap to be used for showing elements.
Default value is "jet"
Returns
-------
plotly.graph_objs._figure.Figure
Figure object on which Kromer Plot is created
"""
# Calculate data attributes required for plotting
# and save them in instance itself
self._calculate_plotting_data(
packets_mode=packets_mode,
packet_wvl_range=packet_wvl_range,
distance=distance,
)
if fig is None:
self.fig = go.Figure()
else:
self.fig = fig
# Set colormap to be used in elements of emission and absorption plots
self.cmap = cm.get_cmap(cmapname, self.elements.size)
self._plot_emission_ply()
self._plot_absorption_ply()
# Plot modeled spectrum
if show_modeled_spectrum:
self.fig.add_trace(
go.Scatter(
x=self.plot_wavelength,
y=self.modeled_spectrum_luminosity,
mode="lines",
line=dict(
color="blue",
width=1,
),
name=f"{packets_mode.capitalize()} Spectrum",
)
)
# Plot photosphere
self.fig.add_trace(
go.Scatter(
x=self.plot_wavelength,
y=self.photosphere_luminosity,
mode="lines",
line=dict(width=1.5, color="red", dash="dash"),
name="Blackbody Photosphere",
)
)
self._show_colorbar_ply()
# Set label and other layout options
xlabel = pu.axis_label_in_latex("Wavelength", u.AA)
if distance: # Set y-axis label for flux
ylabel = pu.axis_label_in_latex(
"F_{\\lambda}", u.Unit("erg/(s cm**2 AA)"), only_text=False
)
else: # Set y-axis label for luminosity
ylabel = pu.axis_label_in_latex(
"L_{\\lambda}", u.Unit("erg/(s AA)"), only_text=False
)
self.fig.update_layout(
xaxis=dict(
title=xlabel,
exponentformat="none",
),
yaxis=dict(title=ylabel, exponentformat="e"),
height=graph_height,
)
return self.fig
@staticmethod
def to_rgb255_string(color_tuple):
"""
Convert a matplotlib RGBA tuple to a generic RGB 255 string.
Parameters
----------
color_tuple : tuple
Matplotlib RGBA tuple of float values in closed interval [0, 1]
Returns
-------
str
RGB string of format rgb(r,g,b) where r,g,b are integers between
0 and 255 (both inclusive)
"""
color_tuple_255 = tuple([int(x * 255) for x in color_tuple[:3]])
return f"rgb{color_tuple_255}"
def _plot_emission_ply(self):
"""Plot emission part of the Kromer Plot using plotly."""
# By specifying a common stackgroup, plotly will itself add up
# luminosities, in order, to created stacked area chart
self.fig.add_trace(
go.Scatter(
x=self.emission_luminosities_df.index,
y=self.emission_luminosities_df.noint,
mode="none",
name="No interaction",
fillcolor="black",
stackgroup="emission",
)
)
self.fig.add_trace(
go.Scatter(
x=self.emission_luminosities_df.index,
y=self.emission_luminosities_df.escatter,
mode="none",
name="Electron Scatter Only",
fillcolor="grey",
stackgroup="emission",
)
)
elements_z = self.emission_luminosities_df.columns[2:]
nelements = len(elements_z)
for i, atomic_num in enumerate(elements_z):
self.fig.add_trace(
go.Scatter(
x=self.emission_luminosities_df.index,
y=self.emission_luminosities_df[atomic_num],
mode="none",
name=atomic_number2element_symbol(atomic_num),
fillcolor=self.to_rgb255_string(self.cmap(i / nelements)),
stackgroup="emission",
showlegend=False,
)
)
def _plot_absorption_ply(self):
"""Plot absorption part of the Kromer Plot using plotly."""
elements_z = self.absorption_luminosities_df.columns
nelements = len(elements_z)
for i, atomic_num in enumerate(elements_z):
self.fig.add_trace(
go.Scatter(
x=self.absorption_luminosities_df.index,
# to plot absorption luminosities along negative y-axis
y=self.absorption_luminosities_df[atomic_num] * -1,
mode="none",
name=atomic_number2element_symbol(atomic_num),
fillcolor=self.to_rgb255_string(self.cmap(i / nelements)),
stackgroup="absorption",
showlegend=False,
)
)
def _show_colorbar_ply(self):
"""Show plotly colorbar with labels of elements mapped to colors."""
# Interpolate [0, 1] range to create bins equal to number of elements
colorscale_bins = np.linspace(0, 1, num=self.elements.size + 1)
# Create a categorical colorscale [a list of (reference point, color)]
# by mapping same reference points (excluding 1st and last bin edge)
# twice in a row (https://plotly.com/python/colorscales/#constructing-a-discrete-or-discontinuous-color-scale)
categorical_colorscale = []
for i in range(self.elements.size):
color = self.to_rgb255_string(self.cmap(colorscale_bins[i]))
categorical_colorscale.append((colorscale_bins[i], color))
categorical_colorscale.append((colorscale_bins[i + 1], color))
coloraxis_options = dict(
colorscale=categorical_colorscale,
showscale=True,
cmin=0,
cmax=self.elements.size,
colorbar=dict(
title="Elements",
tickvals=np.arange(0, self.elements.size) + 0.5,
ticktext=[
atomic_number2element_symbol(atomic_num)
for atomic_num in self.elements
],
# to change length and position of colorbar
len=0.75,
yanchor="top",
y=0.75,
),
)
# Plot an invisible one point scatter trace, to make colorbar show up
scatter_point_idx = pu.get_mid_point_idx(self.plot_wavelength)
self.fig.add_trace(
go.Scatter(
x=self.plot_wavelength[scatter_point_idx],
y=[0],
mode="markers",
showlegend=False,
hoverinfo="skip",
marker=dict(color=[0], opacity=0, **coloraxis_options),
)
)
``` |
{
"source": "JigarJoshi/openapi-generator",
"score": 3
} |
#### File: tiny/cpp/pre_compiling_bourne.py
```python
Import("env")
## Compatibility for bourne to work on microcontrollers
# We insert '#define _GLIBCXX_USE_C99' in files that use std::stoll or std::to_string
def insert_c99_into(file):
import fileinput
path = env['PROJECT_LIBDEPS_DIR'] + "/" + env['PIOENV'] + "/bourne/src/bourne/" + file
value = '#define _GLIBCXX_USE_C99 1\n'
for line in fileinput.FileInput(path,inplace=1):
if line.startswith('#define _GLIBCXX_USE_C99'):
continue
elif line.startswith('// D'):
line=line.replace(line,line+value)
print(line, end='')
def fix_parser():
insert_c99_into('detail/parser.cpp')
def fix_json():
insert_c99_into('json.cpp')
fix_parser()
fix_json()
```
#### File: python-experimental/tests_manual/test_array_holding_any_type.py
```python
import sys
import unittest
from datetime import date, datetime, timezone
import petstore_api
from petstore_api.model.array_holding_any_type import ArrayHoldingAnyType
from petstore_api.schemas import NoneClass, BoolClass
class TestArrayHoldingAnyType(unittest.TestCase):
"""ArrayHoldingAnyType unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testArrayHoldingAnyType(self):
"""Test ArrayHoldingAnyType"""
enum_values = [True, False]
for enum_value in enum_values:
inst = ArrayHoldingAnyType([enum_value])
assert isinstance(inst, ArrayHoldingAnyType)
assert isinstance(inst, tuple)
assert isinstance(inst[0], BoolClass)
assert bool(inst[0]) is enum_value
inst = ArrayHoldingAnyType([None])
assert isinstance(inst, ArrayHoldingAnyType)
assert isinstance(inst, tuple)
assert isinstance(inst[0], NoneClass)
input_to_stored_value = [
(0, 0),
(3.14, 3.14),
(date(1970, 1, 1), '1970-01-01'),
(datetime(1970, 1, 1, 0, 0, 0), '1970-01-01T00:00:00'),
(datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc), '1970-01-01T00:00:00+00:00'),
([], ()),
({}, {}),
('hi', 'hi'),
]
for input, stored_value in input_to_stored_value:
inst = ArrayHoldingAnyType([input])
assert isinstance(inst, ArrayHoldingAnyType)
assert isinstance(inst, tuple)
assert inst[0] == stored_value
if __name__ == '__main__':
unittest.main()
```
#### File: python-experimental/tests_manual/test_date_time_with_validations.py
```python
import unittest
import petstore_api
from petstore_api.model.date_time_with_validations import DateTimeWithValidations
from datetime import date, datetime, timezone
class TestDateTimeWithValidations(unittest.TestCase):
"""DateTimeWithValidations unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDateTimeWithValidations(self):
"""Test DateTimeWithValidations"""
# works with datetime input
valid_values = [datetime(2020, 1, 1), '2020-01-01T00:00:00']
expected_datetime = '2020-01-01T00:00:00'
for valid_value in valid_values:
inst = DateTimeWithValidations(valid_value)
assert inst == expected_datetime
# when passing data in with _from_openapi_data one must use str
with self.assertRaisesRegex(
petstore_api.ApiTypeError,
r"Invalid type. Required value type is str and passed type was datetime at \['args\[0\]'\]"
):
DateTimeWithValidations._from_openapi_data(datetime(2020, 1, 1))
# when passing data _from_openapi_data we can use str
input_value_to_datetime = {
"2020-01-01T00:00:00": datetime(2020, 1, 1, tzinfo=None),
"2020-01-01T00:00:00Z": datetime(2020, 1, 1, tzinfo=timezone.utc),
"2020-01-01T00:00:00+00:00": datetime(2020, 1, 1, tzinfo=timezone.utc)
}
for input_value, expected_datetime in input_value_to_datetime.items():
inst = DateTimeWithValidations._from_openapi_data(input_value)
assert inst.as_datetime == expected_datetime
# value error is raised if an invalid string is passed in
with self.assertRaisesRegex(
petstore_api.ApiValueError,
r"Value does not conform to the required ISO-8601 datetime format. Invalid value 'abcd' for type datetime at \('args\[0\]',\)"
):
DateTimeWithValidations._from_openapi_data("abcd")
# value error is raised if a date is passed in
with self.assertRaisesRegex(
petstore_api.ApiValueError,
r"Value does not conform to the required ISO-8601 datetime format. Invalid value '2020-01-01' for type datetime at \('args\[0\]',\)"
):
DateTimeWithValidations(date(2020, 1, 1))
# pattern checking with string input
error_regex = r"Invalid value `2019-01-01T00:00:00Z`, must match regular expression `.+?` at \('args\[0\]',\)"
with self.assertRaisesRegex(
petstore_api.ApiValueError,
error_regex
):
DateTimeWithValidations._from_openapi_data("2019-01-01T00:00:00Z")
# pattern checking with date input
error_regex = r"Invalid value `2019-01-01T00:00:00`, must match regular expression `.+?` at \('args\[0\]',\)"
with self.assertRaisesRegex(
petstore_api.ApiValueError,
error_regex
):
DateTimeWithValidations(datetime(2019, 1, 1))
if __name__ == '__main__':
unittest.main()
```
#### File: python-experimental/tests_manual/test_drawing.py
```python
import sys
import unittest
import petstore_api
from petstore_api.schemas import NoneClass
from petstore_api.model import shape
from petstore_api.model import shape_or_null
from petstore_api.model.drawing import Drawing
class TestDrawing(unittest.TestCase):
"""Drawing unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def test_create_instances(self):
"""
Validate instance can be created
"""
inst = shape.Shape(
shapeType="Triangle",
triangleType="IsoscelesTriangle"
)
from petstore_api.model.isosceles_triangle import IsoscelesTriangle
assert isinstance(inst, IsoscelesTriangle)
def test_deserialize_oneof_reference(self):
"""
Validate the scenario when the type of a OAS property is 'oneOf', and the 'oneOf'
schema is specified as a reference ($ref), not an inline 'oneOf' schema.
"""
isosceles_triangle = shape.Shape(
shapeType="Triangle",
triangleType="IsoscelesTriangle"
)
from petstore_api.model.isosceles_triangle import IsoscelesTriangle
assert isinstance(isosceles_triangle, IsoscelesTriangle)
from petstore_api.model.equilateral_triangle import EquilateralTriangle
inst = Drawing(
mainShape=isosceles_triangle,
shapes=[
shape.Shape(
shapeType="Triangle",
triangleType="EquilateralTriangle"
),
shape.Shape(
shapeType="Triangle",
triangleType="IsoscelesTriangle"
),
shape.Shape(
shapeType="Triangle",
triangleType="EquilateralTriangle"
),
shape.Shape(
shapeType="Quadrilateral",
quadrilateralType="ComplexQuadrilateral"
)
],
)
assert isinstance(inst, Drawing)
assert isinstance(inst.mainShape, IsoscelesTriangle)
self.assertEqual(len(inst.shapes), 4)
from petstore_api.model.complex_quadrilateral import ComplexQuadrilateral
assert isinstance(inst.shapes[0], EquilateralTriangle)
assert isinstance(inst.shapes[1], IsoscelesTriangle)
assert isinstance(inst.shapes[2], EquilateralTriangle)
assert isinstance(inst.shapes[3], ComplexQuadrilateral)
# Validate we cannot assign the None value to mainShape because the 'null' type
# is not one of the allowed types in the 'Shape' schema.
err_msg = (r"Invalid inputs given to generate an instance of .+?Shape.+? "
r"None of the oneOf schemas matched the input data.")
with self.assertRaisesRegex(
petstore_api.ApiValueError,
err_msg
):
inst = Drawing(
# 'mainShape' has type 'Shape', which is a oneOf [triangle, quadrilateral]
# So the None value should not be allowed and an exception should be raised.
mainShape=None,
)
"""
we can't pass in an incorrect type for shapes
'shapes' items has type 'Shape', which is a oneOf [Triangle, Quadrilateral]
composed schema. We are not able to assign Triangle tor Quadrilateral
to a shapes item because those instances do not include Shape validation
Shape could require additional validations that Triangle + Quadrilateral do not include
"""
from petstore_api.model.triangle import Triangle
err_msg = (r"Incorrect type passed in, required type was <class 'petstore_api.model.shape.Shape'> "
r"and passed type was <class 'petstore_api.schemas.DynamicSchema'> at "
r"\('args\[0\]', 'shapes', 0\)")
with self.assertRaisesRegex(
petstore_api.ApiTypeError,
err_msg
):
inst = Drawing(
mainShape=isosceles_triangle,
shapes=[
Triangle(
shapeType="Triangle",
triangleType="EquilateralTriangle"
)
]
)
def test_deserialize_oneof_reference_with_null_type(self):
"""
Validate the scenario when the type of a OAS property is 'oneOf', and the 'oneOf'
schema is specified as a reference ($ref), not an inline 'oneOf' schema.
Further, the 'oneOf' schema has a 'null' type child schema (as introduced in
OpenAPI 3.1).
"""
# Validate we can assign the None value to shape_or_null, because the 'null' type
# is one of the allowed types in the 'ShapeOrNull' schema.
inst = Drawing(
# 'shapeOrNull' has type 'ShapeOrNull', which is a oneOf [null, triangle, quadrilateral]
shapeOrNull=None,
)
assert isinstance(inst, Drawing)
self.assertFalse('mainShape' in inst)
self.assertTrue('shapeOrNull' in inst)
self.assertTrue(isinstance(inst.shapeOrNull, NoneClass))
def test_deserialize_oneof_reference_with_nullable_type(self):
"""
Validate the scenario when the type of a OAS property is 'oneOf', and the 'oneOf'
schema is specified as a reference ($ref), not an inline 'oneOf' schema.
Further, the 'oneOf' schema has the 'nullable' attribute (as introduced in
OpenAPI 3.0 and deprecated in 3.1).
"""
# Validate we can assign the None value to nullableShape, because the NullableShape
# has the 'nullable: true' attribute.
inst = Drawing(
# 'nullableShape' has type 'NullableShape', which is a oneOf [triangle, quadrilateral]
# and the 'nullable: true' attribute.
nullableShape=None,
)
assert isinstance(inst, Drawing)
self.assertFalse('mainShape' in inst)
self.assertTrue('nullableShape' in inst)
self.assertTrue(isinstance(inst.nullableShape, NoneClass))
if __name__ == '__main__':
unittest.main()
```
#### File: python-experimental/tests_manual/test_integer_enum_one_value.py
```python
import sys
import unittest
import petstore_api
from petstore_api.model.integer_enum_one_value import IntegerEnumOneValue
class TestIntegerEnumOneValue(unittest.TestCase):
"""IntegerEnumOneValue unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIntegerEnumOneValue(self):
"""Test IntegerEnumOneValue"""
with self.assertRaises(TypeError):
"""
a value must be passed in
We cannot auto assign values because that would break composition if
received payloads included this with no inputs and we the 0 value to the data to the incoming payload
One is not allowed to mutate incoming payloads because then:
- order of composed schema ingestion matters
- one can have default value collisions
- the added data will make expected schemas not match payloads
"""
model = IntegerEnumOneValue()
model = IntegerEnumOneValue(0)
assert model == 0, "We can also pass in the value as a positional arg"
# one cannot pass the value with the value keyword
with self.assertRaises(TypeError):
model = IntegerEnumOneValue(value=0)
# one can pass in the enum value
model = IntegerEnumOneValue(IntegerEnumOneValue.POSITIVE_0)
if __name__ == '__main__':
unittest.main()
```
#### File: python-experimental/tests_manual/test_number_with_validations.py
```python
import sys
import unittest
import petstore_api
from petstore_api.model.number_with_validations import NumberWithValidations
class TestNumberWithValidations(unittest.TestCase):
"""NumberWithValidations unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNumberWithValidations(self):
"""Test NumberWithValidations"""
valid_values = [10.0, 15.0, 20.0]
for valid_value in valid_values:
model = NumberWithValidations(valid_value)
assert model == valid_value
value_error_msg_pairs = (
(9.0, r"Invalid value `9.0`, must be a value greater than or equal to `10` at \('args\[0\]',\)"),
(21.0, r"Invalid value `21.0`, must be a value less than or equal to `20` at \('args\[0\]',\)"),
)
for invalid_value, error_msg in value_error_msg_pairs:
with self.assertRaisesRegex(petstore_api.ApiValueError, error_msg):
NumberWithValidations(invalid_value)
if __name__ == '__main__':
unittest.main()
```
#### File: python-experimental/tests_manual/test_object_with_validations.py
```python
import sys
import unittest
import petstore_api
from petstore_api.model.object_with_validations import ObjectWithValidations
class TestObjectWithValidations(unittest.TestCase):
"""ObjectWithValidations unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def test_ObjectWithValidations(self):
"""Test ObjectWithValidations"""
with self.assertRaisesRegex(
petstore_api.ApiValueError,
r"Invalid value `frozendict.frozendict\({}\)`, number of properties must be greater than or equal to `2` at \('args\[0\]',\)"
):
ObjectWithValidations({})
with self.assertRaisesRegex(
petstore_api.ApiValueError,
r"Invalid value `frozendict.frozendict\({'a': 'a'}\)`, number of properties must be greater than or equal to `2` at \('args\[0\]',\)"
):
# number of properties less than 2 fails
model = ObjectWithValidations(a='a')
# 2 or more properties succeeds
model = ObjectWithValidations(a='a', b='b')
model = ObjectWithValidations(a='a', b='b', c='c')
if __name__ == '__main__':
unittest.main()
```
#### File: python-legacy/test/test_format_test.py
```python
from __future__ import absolute_import
import unittest
import datetime
import petstore_api
from petstore_api.models.format_test import FormatTest # noqa: E501
from petstore_api.rest import ApiException
class TestFormatTest(unittest.TestCase):
"""FormatTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test FormatTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = petstore_api.models.format_test.FormatTest() # noqa: E501
if include_optional :
return FormatTest(
integer = 10,
int32 = 20,
int64 = 56,
number = 32.1,
float = 54.3,
double = 67.8,
decimal = 1,
string = 'a',
byte = 'YQ==',
binary = bytes(b'blah'),
date = datetime.datetime.strptime('1975-12-30', '%Y-%m-%d').date(),
date_time = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
uuid = '72f98069-206d-4f12-9f12-3d1e525a8e84',
password = '<PASSWORD>',
pattern_with_digits = '0480728880',
pattern_with_digits_and_delimiter = 'image_480'
)
else :
return FormatTest(
number = 32.1,
byte = 'YQ==',
date = datetime.datetime.strptime('1975-12-30', '%Y-%m-%d').date(),
password = '<PASSWORD>',
)
def testFormatTest(self):
"""Test FormatTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
```
#### File: python/tests_manual/test_composed_schema_with_props_and_no_add_props.py
```python
import sys
import unittest
import petstore_api
from petstore_api.model.tag import Tag
globals()['Tag'] = Tag
from petstore_api.model.composed_schema_with_props_and_no_add_props import ComposedSchemaWithPropsAndNoAddProps
class TestComposedSchemaWithPropsAndNoAddProps(unittest.TestCase):
"""ComposedSchemaWithPropsAndNoAddProps unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComposedSchemaWithPropsAndNoAddProps(self):
"""Test ComposedSchemaWithPropsAndNoAddProps"""
inst = ComposedSchemaWithPropsAndNoAddProps(color='red')
# ComposedSchemaWithPropsAndNoAddProps should only allow in the color property
# once https://github.com/OpenAPITools/openapi-generator/pull/8816 lands
# this will no longer work
# TODO update the test then
inst = ComposedSchemaWithPropsAndNoAddProps(color='red', id=1, name='foo')
with self.assertRaises(petstore_api.ApiAttributeError):
inst = ComposedSchemaWithPropsAndNoAddProps(color='red', id=1, name='foo', additional=5)
if __name__ == '__main__':
unittest.main()
```
#### File: python/tests_manual/test_mole.py
```python
import sys
import unittest
from petstore_api.exceptions import ApiAttributeError
import petstore_api
try:
from petstore_api.model import mole
except ImportError:
mole = sys.modules["petstore_api.model.mole"]
class TestMole(unittest.TestCase):
"""Triangle unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMole(self):
# includes required parameters that are `readOnly=False` and not defined `readOnly`
my_mole = mole.Mole(smell="dirt", hearing=False)
assert my_mole.smell == "dirt"
assert my_mole.hearing is False
# includes required parameters that `readOnly=False`, an optional `readOnly=False` and not defined `readOnly`
my_mole = mole.Mole(smell="dirt", taste="kfc", hearing=True)
assert my_mole.smell == "dirt"
assert my_mole.taste == "kfc"
assert my_mole.hearing is True
# includes required parameters that `readOnly=False`, and required not defined `readOnly`, and an optional not defined
my_mole = mole.Mole(smell="dirt", seeing_ghosts=True, hearing=False)
assert my_mole.smell == "dirt"
assert my_mole.seeing_ghosts is True
assert my_mole.hearing is False
# passing in required readOnly parameters raises an exception
with self.assertRaises(ApiAttributeError):
mole.Mole(smell="dirt", hearing=False, blind=True)
# passing in optional readOnly parameters raises an exception
with self.assertRaises(ApiAttributeError):
mole.Mole(smell="dirt", hearing=False, touch=True)
# passing in required an optional parameters with readOnly true or false works with from_openapi_data
my_mole = mole.Mole._from_openapi_data(
smell="dirt",
taste="kfc",
blind=True,
touch=False,
hearing=True,
seeing_ghosts=False,
)
assert my_mole.smell == "dirt"
assert my_mole.taste == "kfc"
assert my_mole.blind is True
assert my_mole.touch is False
assert my_mole.hearing is True
assert my_mole.seeing_ghosts is False
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jigarmehta1999/selenium",
"score": 2
} |
#### File: py/private/suite.bzl
```python
load("@rules_python//python:defs.bzl", "py_library")
load("//py/private:pytest.bzl", "pytest_test")
def _is_test(file):
return file.startswith("test_") or file.endswith("_tests.py")
def py_test_suite(name, srcs, size = None, deps = None, python_version = None, imports = None, visibility = None, **kwargs):
library_name = "%s-test-lib" % name
py_library(
name = library_name,
testonly = True,
srcs = srcs,
deps = deps,
imports = imports,
)
tests = []
for src in srcs:
if _is_test(src):
test_name = "%s-%s" % (name, src)
tests.append(test_name)
pytest_test(
name = test_name,
size = size,
srcs = [src],
deps = [library_name],
python_version = python_version,
**kwargs,
)
native.test_suite(
name = name,
tests = tests,
visibility = visibility,
)
``` |
{
"source": "jigarparekh279/Machine_Learning_Projects",
"score": 3
} |
#### File: Machine_Learning_Projects/01_Seoul_Bike_Trip_Duration_Prediction/04_Model_Deployment.py
```python
import numpy as np
import joblib
import os
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
import streamlit as st
########################################################################
# Capture the path on current folder on cloud
curr_path = os.path.dirname(os.path.realpath(__file__))
feat_cols = ['Distance', 'Haversine', 'Phour', 'Pmin', 'Dhour',
'Dmin', 'Temp', 'Humid', 'Solar', 'Dust']
scalar = joblib.load('models/scalar.joblib')
model = keras.models.load_model('models/ANN.h5')
def predict_duration(attributes: np.ndarray):
# Retrun bike trip duration value
scaled_attributes = scalar.transform(attributes)
print(scaled_attributes)
pred = model.predict(scaled_attributes)
return int(pred[0,0])
########################################################################
st.set_page_config(page_title="Seoul Bike Trip Duration Prediction App",
page_icon="🛴", layout="wide")
with st.form("prediction_form"):
st.header("Enter the Deciding Factors:")
distance = st.number_input("Distance: ", value=8490, format="%d")
haversine = st.number_input("Haversine: ", value=3.400058)
phour = st.slider("Pickup Hour: ", 0, 23, value=19, format="%d")
pmin = st.slider("Pickup Minute: ", 0, 59, value=14, format="%d")
dhour = st.slider("Dropoff Hour: ", 0, 23, value=20, format="%d")
dmin = st.slider("Dropoff Minute: ", 0, 59, value=12, format="%d")
temp = st.number_input("Temp: ", value=8.8)
humid = st.number_input("Humid: ", value= 49.0)
solar = st.number_input("Solar: ", value=0.05)
dust = st.number_input("Dust: ", value=27.0)
submit_val = st.form_submit_button("Predict Duration")
if submit_val:
# If submit is pressed == True
attributes = np.array([distance, haversine, phour, pmin, dhour,
dmin, temp, humid, solar, dust]).reshape(1,-1)
if attributes.shape == (1,10):
print("Attributes are valid")
value = predict_duration(attributes=attributes)
st.header("Here are the results:")
st.success(f"The Duration predicted is {value} mins")
``` |
{
"source": "jigartarpara/greenshine_customization",
"score": 2
} |
#### File: customization/lead/lead.py
```python
import frappe
from frappe.model.mapper import get_mapped_doc
@frappe.whitelist()
def make_survey_form(source_name, target_doc=None):
target_doc = get_mapped_doc("Lead", source_name,
{"Lead": {
"doctype": "Survey Form",
"field_map": {
"name": "lead",
"lead_name": "company_name",
"company_name": "name_of_kitchen",
"email_id": "email",
"mobile_no": "mobile"
}
}}, target_doc)
return target_doc
```
#### File: doctype/survey_form/survey_form.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SurveyForm(Document):
def onload(self):
customer = frappe.get_value("Customer", {"lead_name" : self.lead},['name'])
self.customer = customer
```
#### File: greenshine_customization/greenshine_customization/utils.py
```python
import frappe
def onload(doc, method):
total = 0
total_month = 0
test = 0
for item in doc.items:
total += item.total_year
total_month += item.total_monthly
test += item.amount
doc.final_total = total
doc.final_total_month = total_month
doc.total = test
``` |
{
"source": "jigartarpara/realestate",
"score": 2
} |
#### File: doctype/realestate_partner/realestate_partner.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class RealEstatePartner(Document):
def validate(self):
if not self.shareholder:
self.create_shareholder()
def create_shareholder(self):
if frappe.db.get_value("Shareholder", self.partner_name, "name"):
shareholder = frappe.get_doc("Shareholder",self.partner_name)
else:
shareholder = frappe.get_doc({
"doctype": "Shareholder",
"title": self.partner_name
})
shareholder.save()
self.shareholder = shareholder.name
``` |
{
"source": "jigartarpara/warranty_management_system",
"score": 2
} |
#### File: doctype/service_order/service_order.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ServiceOrder(Document):
def on_submit(self):
if self.issue:
issue = frappe.get_doc("Issue",self.issue)
issue.status = "Warranty Claim Raised"
issue.save(ignore_permissions=True)
``` |
{
"source": "jigarWala/ccr",
"score": 2
} |
#### File: ccr/ccr/CodeCompileRun.py
```python
import logging
from .client import *
from .utility import supported_languages,supported_languages_extension
from multiprocessing import Pool
log = logging.getLogger("ccr.CodeCompileRun")
class CCR:
def __init__(self, source_file_path, input_file_path,lang=None):
self.lang = lang
try:
self.ext = source_file_path.split(".")[-1]
except Exception:
log.warning("unable to get file extension from file {}".format(source_file_path))
try:
self.source_code = self.read_file(source_file_path)
if(input_file_path):
self.input = self.read_file(input_file_path)
else:
self.input = None
except Exception as e:
log.critical('Exception occured at CCR init {}'.format(e))
raise e
id = None
found_id = False
if(lang):
try:
self.id = supported_languages[lang]
found_id = True
except KeyError:
raise Exception("{} is not a supported language\nccr --help".format(lang))
if(not found_id):
try:
self.id = supported_languages_extension[self.ext]
except KeyError:
raise Exception("{} is not a supported extension, use -l to specify language explicitly\nccr --help for more".format(self.ext))
self.clients = [CodechefClient(), GeeksForGeeksClient()]
def read_file(self, file):
content = ''
try:
with open(file) as f:
content = f.read()
except Exception:
raise IOError("Unable to read file {}".format(file))
return content
def execute(self):
try:
n = len(self.clients)
args = (self.source_code, self.input, self.id)
targets = map(lambda client : client.run,self.clients)
with Pool(processes=2) as pool:
rs = []
for target in targets:
rs.append(pool.apply_async(target, args= args))
clients_result_ids = set()
done = False
while len(clients_result_ids) < n and not done:
for i in range(len(rs)):
r = rs[i]
if r.ready():
ans = r.get()
if ans:
print(ans)
done = True
break
else:
clients_result_ids.add(i)
except Exception as e:
log.error("something went wrong in execute method : {}".format(e))
raise e
```
#### File: ccr/ccr/utility.py
```python
import requests
import pickle
from os import path
from pkg_resources import resource_filename
codechefs_languages_map = dict()
# leetcodes_supported_languages = ["cpp", "java", "python", "python3", "c", "csharp", "javascript", "ruby", "swift", "kotlin", "scala", "bash", "go"]
leetcodes_languages_map = dict()
# geeksforgeeks_supported_languages = ["Python", "Python3", "Cpp", "Cpp14", "Java", "Csharp", "C", "Php", "Scala", "Perl"]
geeksforgeeks_languages_map = dict()
supported_languages = dict()
supported_languages_extension = dict()
def set_codechefs_languages_mapping(id, lang_code):
"""
codechef uses lang_code for languages to identify which interpreter/compiler to use
"""
# for python language --lang not set then detection by extension(.py) for different python2,python3,pypy,pypy_3 Maps to pypy_3 (id = 48)
codechefs_languages_map[id] = lang_code
def set_geeksforgeeks_language_mapping(id, geekslang):
geeksforgeeks_languages_map[id] = geekslang
def set_leetcodes_language_mapping(id, leetslang):
leetcodes_languages_map[id] = leetslang
def load_supported_languages():
"""
codeched has extensive range of languages so using it as base for all supported languages by ccr
"""
response = requests.get('https://www.codechef.com/api/ide/undefined/languages/all').json()
id = 1
for lang_code, payload in response['languages'].items():
lang = "_".join(payload['full_name'].lower().split())
# print(lang)
supported_languages[lang] = id
supported_languages_extension[payload['extension']] = id
geekslang = leetslang = None
# map codechef's supported languages to other OJ clients
if lang == 'c++14':
geekslang = 'Cpp14'
leetslang = 'cpp'
elif lang == 'java':
geekslang = 'Java'
leetslang = 'java'
elif lang == 'python' or lang =="pypy":
geekslang = "Python"
leetslang = "python"
elif lang == 'python3' or lang == 'pypy_3':
geekslang = "Python3"
leetslang = "python3"
elif lang == 'c':
geekslang = 'C'
leetslang = 'c'
elif lang == 'c#':
geekslang = 'Csharp'
leetslang = 'csharp'
elif lang == 'scala':
geekslang = 'Scala'
leetslang = 'scala'
elif lang == 'php':
geekslang = 'Php'
elif lang == 'perl':
geekslang = 'Perl'
elif lang == 'go':
leetslang = 'go'
elif lang == 'swift':
leetslang = 'swift'
elif lang == 'ruby':
leetslang = 'ruby'
elif lang == 'kotlin':
leetslang = 'kotlin'
elif lang == 'bash':
leetslang = 'bash'
if geekslang:
set_geeksforgeeks_language_mapping(id, geekslang)
if leetslang:
set_leetcodes_language_mapping(id, leetslang)
set_codechefs_languages_mapping(id, payload['id'])
id += 1
def dump_pickle(path,data):
try:
with open(path,"wb") as f:
pickle.dump(data,f)
except Exception as e:
raise e
def load_pickle(path):
try:
with open(path,"rb") as f:
data = pickle.load(f)
return data
except Exception as e:
raise e
# delete pickle if new language added in codechef api
try:
#during devlopment use this
# sl_path = path.join(path.dirname(path.realpath(__file__)),"pickle/supported_languages.pickle")
# sle_path = path.join(path.dirname(path.realpath(__file__)),"pickle/supported_languages_extension.pickle")
# clm_path = path.join(path.dirname(path.realpath(__file__)),"pickle/codechefs_languages_map.pickle")
# glm_path = path.join(path.dirname(path.realpath(__file__)),"pickle/geeksforgeeks_languages_map.pickle")
#for packaging use below
sl_path = resource_filename("ccr","pickle/supported_languages.pickle")
sle_path = resource_filename("ccr","pickle/supported_languages_extension.pickle")
clm_path = resource_filename("ccr","pickle/codechefs_languages_map.pickle")
glm_path = resource_filename("ccr","pickle/geeksforgeeks_languages_map.pickle")
supported_languages = load_pickle(sl_path)
supported_languages_extension = load_pickle(sle_path)
codechefs_languages_map = load_pickle(clm_path)
geeksforgeeks_languages_map = load_pickle(glm_path)
except Exception:
load_supported_languages()
try:
dump_pickle(sl_path,supported_languages)
dump_pickle(sle_path,supported_languages_extension)
dump_pickle(clm_path,codechefs_languages_map)
dump_pickle(glm_path,geeksforgeeks_languages_map)
except Exception:
print("unable to dump pickle data..")
def supported_language_cli_output_maker():
"""
get the output and cut paste in cli.py
"""
count = 0
for i in supported_languages:
print("- "+i, end="\t")
count+=1
if(count == 3):
print()
count = 0
# print(supported_languages)
# print(supported_languages_extension)
# print(codechefs_languages_map)
# print(geeksforgeeks_languages_map)
``` |
{
"source": "jigarWala/Hackerrank",
"score": 3
} |
#### File: __algorithms/warmup/a-very-big-sum.py
```python
def list_ip():
return list(map(int,input().strip().split(' ')))
input()
print(sum(list_ip()))
``` |
{
"source": "jigarWala/tinyurl",
"score": 3
} |
#### File: jigarWala/tinyurl/shortner.py
```python
import sqlite3
import os
PATH = "C:\\Users\\jigar\\Desktop\\shortner\\tinyurls.db"
def create_db():
con = sqlite3.connect(PATH)
query = """
create table if not exists mappings(id integer PRIMARY KEY AUTOINCREMENT,
url varchar, shorturl varchar)
"""
con.execute(query)
con.commit()
con.close()
def insert_entry(url,shorturl):
con = sqlite3.connect(PATH)
query = """
insert into mappings(url,shorturl) values(?,?)
"""
con.execute(query,(url,shorturl))
con.commit()
con.close()
def cleanup():
con = sqlite3.connect(PATH)
query = """
delete from mappings;"""
con.execute(query)
query = "delete from sqlite_sequence where name='mappings';"
con.execute(query)
con.commit()
con.close()
charmaps = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
n = len(charmaps)
def shorten(url):
con = sqlite3.connect(PATH)
cur = con.cursor()
cur.execute("select count(*) from mappings")
id = cur.fetchone()[0]
if id is None:
id = 1
else:
id += 1
shorturl = ""
while id>0:
# only seven characters so ignoring performance loss due to immutability
shorturl += charmaps[id%n]
id//=n;
shorturl = shorturl[::-1]
shorturl = f"jig.lu/{shorturl}"
insert_entry(url,shorturl)
return shorturl
def elate(shorturl):
# jig.lu/GHhh90
shorturl = shorturl[7:]
id = 0
for i in shorturl:
ascii = ord(i)
if ascii >= ord('A') and ascii <= ord('Z'):
idx = ascii - ord('A')
elif ascii >= ord('a') and ascii <= ord('z'):
idx = ascii - ord('a') + 26
elif ascii >= ord('0') and ascii <= ord('9'):
idx = ascii - ord('0') +26 + 26
else :
raise Error('oopsie')
id = id*n + idx
# print(id)
con = sqlite3.connect(PATH)
cur = con.cursor()
cur.execute("select * from mappings where id = ?",(id,))
url = cur.fetchone()
if url is None:
print(f"yeh none \n {shorturl} {id}")
con.close()
return url[1]
if not os.path.exists(PATH):
create_db()
``` |
{
"source": "jigglepuff/StlOpenDataEtl",
"score": 3
} |
#### File: StlOpenDataEtl/etl/fetcher_response.py
```python
class FetcherResponse:
name = ''
source = ''
payload = None
error = None
def __init__(self, name, payload, source, error):
self.name = name
self.payload = payload
self.source = source
self.error = error
def __repr__(self):
return str(self.to_dict())
def to_dict(self):
return {
'name': self.name,
'payload': self.payload,
'source': self.source,
'error': self.error
}
``` |
{
"source": "jigglequack/Riddle-Me-This",
"score": 3
} |
#### File: jigglequack/Riddle-Me-This/generateRiddles.py
```python
from bs4 import BeautifulSoup
import requests
import difflib
def getRiddle() -> dict:
'''Returns a dict of a riddle and its answer if the answer is
two words or less'''
page_link = 'https://riddles.fyi/random-riddles/'
page_response = requests.get(page_link, timeout=5)
page_content = BeautifulSoup(page_response.content, "html.parser")
riddle_find = page_content.find("a", class_ = "query-title-link")
riddle = str(riddle_find.text)
answer_find = page_content.find("div", class_ = "su-spoiler-content su-clearfix")
answer = str(answer_find.text)
if len(answer.split()) > 3:
return getRiddle()
else:
riddle_dict = {"Riddle": riddle, "Answer": answer}
return riddle_dict
#def test_outputs() -> str:
# x = getRiddle()
# for r in x.values():
# print(r)
print(getRiddle())
def allRiddles(ridict: dict, rounds: int) -> dict:
'''Returns a dict with different riddles and no duplicates
enough for however many rounds'''
riddle = getRiddle()
if len(ridict) == 0:
ridict[riddle["Riddle"]] = riddle["Answer"]
if len(ridict) == rounds:
return ridict
else:
for key,value in riddle.items():
if value in ridict.values():
return allRiddles(ridict,rounds)
else:
ridict[riddle["Riddle"]] = riddle["Answer"]
return allRiddles(ridict,rounds)
#def test_outputs2() -> str:
# x = allRiddles(dict(),100)
# for i in x.values():
# print(i)
def correctAnswer(correct_input: str, user_input: str):
'''Returns true if a user's input is correct'''
user_list = [user_input]
correct_input = correct_input.lower()
correct = difflib.get_close_matches(correct_input, user_list,1,0.8)
if len(correct) > 0:
#print("True")
return True
else:
#print("False")
return False
#correctAnswer("TO GET to the OTHER side", "togettotheotherside")
``` |
{
"source": "jiggls/studyingpython",
"score": 4
} |
#### File: jiggls/studyingpython/war.py
```python
import random
import time
import math
class Timer(object):
counter = 0
class Fighter(object): # parent class
counter = 0
hp = 100
maxhp = 100
name = ''
ap = 1
armor = 10
def __init__(self, hp, name, ap=1, armor=10, count=None):
self.hp = hp
self.maxhp = hp
self.name = name
self.ap = ap
self.armor = armor
self.count = count
def attack(self, damage):
damage = damage - ((damage * self.armor) // 100)
self.hp = self.hp - damage
def get_ap(self):
return self.ap
def __repr__(self):
# print(self.__class__.mro())
return self.get_name() + ': ' + str(self.get_hp())
def is_fullhp(self):
# if self.hp == self.maxhp:#можно так
# return True
# else:
# return False
return self.hp == self.maxhp # а можно так
class Getters():
def get_hp(self):
return self.hp
def get_armor(self):
return self.armor
def get_name(self):
return self.name
class Nameforpriest():
def __repr__(self):
return 'HEALER ' + self.get_name() + ': ' + str(self.get_hp())
class Warrior(Fighter, Getters):
def get_ap(self):
return int(self.ap * random.random())
class Champion(Fighter, Getters):
def get_ap(self):
if random.random() > 0.25:
return self.ap
else:
print(self.name, ' champion have critical attack')
return self.ap * 2
class Captain(Fighter, Getters):
def attack(self, damage):
a = input(f'what {self.name} should do? if 1 then shield for 10 turns, damage :{damage}')
if a == '1':
self.counter = self.count.counter
return
if damage > 30:
super().attack(damage)
def get_ap(self):
if self.count.counter <= self.counter + 10:
return 0
else:
return super().get_ap()
class Healer(Nameforpriest, Fighter, Getters):
mana = 100
maxmana = 100
def heal(self, items):
currentcounter = self.count.counter
sec = currentcounter - self.counter
manareg = sec * 2
self.mana += manareg
if self.mana > self.maxmana:
self.mana = self.maxmana
target = items[0]
for i in items:
if i.get_hp() < target.get_hp() and not target.is_fullhp():
target = i
print('healer status ', target.is_fullhp(), self.mana)
if not target.is_fullhp() and self.mana >= 20:
hp = target.get_hp() + 30
self.mana -= 20
if hp > target.maxhp:
hp = target.maxhp
target.hp = hp
print('HEALER ', self.name, ' target: ', target.name, ' new hp: ', target.hp)
self.counter = self.count.counter
count = Timer()
orcs = []
humans = []
for i in range(15):
orc = Warrior(120, 'orc barbarian' + str(i), ap=50, armor=0, count=count)
orcs.append(orc)
orcchampion = Champion(160, "orgrim champion" + str(i), ap=80, armor=0, count=count)
orcs.append(orcchampion)
orcchampion = Champion(160, "zak-zak champion" + str(i), ap=80, armor=0, count=count)
orcs.append(orcchampion)
orcchampion = Champion(160, "samuro champion" + str(i), ap=85, armor=0, count=count)
orcs.append(orcchampion)
orchero = Captain(480, "rexxar hero" + str(i), ap=200, armor=10, count=count)
orcs.append(orchero)
orchealer = Healer(100, "druid healer", ap=0, armor=50, count=count)
orcs.append(orchealer)
orchealer = Healer(100, "druid healer", ap=0, armor=50, count=count)
orcs.append(orchealer)
for i in range(10):
human = Warrior(100, 'human soldier' + str(i), ap=40, armor=20, count=count)
humans.append(human)
humanchampion = Champion(160, "varian champion" + str(i), ap=75, armor=30, count=count)
humans.append(humanchampion)
humanchampion = Champion(160, "anduin champion" + str(i), ap=75, armor=25, count=count)
humans.append(humanchampion)
humanchampion = Champion(160, "sedogriv champion" + str(i), ap=90, armor=0, count=count)
humans.append(humanchampion)
humanhero = Captain(400, "lotar hero" + str(i), ap=160, armor=40, count=count)
humans.append(humanhero)
humanhealer = Healer(100, "priest healer", ap=0, armor=50, count=count)
humans.append(humanhealer)
humanhealer = Healer(100, "priest healer", ap=0, armor=50, count=count)
humans.append(humanhealer)
print(orcs)
print(humans)
while orcs and humans:
count.counter += 1
time.sleep(1)
if random.random() > 0.5:
# unit2.attack(20)
# print('unit1 attacks unit2')
# print('unit2 have', unit2.get_hp())
attackers = orcs
victims = humans
else:
# unit1.attack(20)
# print('unit2 attacks unit1')
# print('unit1 have', unit1.get_hp())
attackers = humans
victims = orcs
attacker = random.choice(attackers)
victim = random.choice(victims)
if type(attacker) is Healer:
attacker.heal(attackers)
else:
ap = attacker.get_ap()
victim.attack(ap)
print('unit ', attacker.get_name(), ' attacks ', victim.get_name(), ' via ap ', ap, ' victim hp = ',
victim.get_hp())
if victim.get_hp() <= 0:
print(victim.get_name(), ' have died')
victims.remove(victim)
print(orcs)
print(humans)
# if unit1.get_hp() > 0:
# print('unit1 win,he have ',unit1.get_hp())
# else:
# print ('unit2 win,he have ',unit2.get_hp())
``` |
{
"source": "jiggylepcha/pronto",
"score": 3
} |
#### File: pronto/parsers/obo.py
```python
import os
import fastobo
from .base import BaseParser
from ._fastobo import FastoboParser
class OboParser(FastoboParser, BaseParser):
@classmethod
def can_parse(cls, path, buffer):
return buffer.lstrip().startswith((b"format-version:", b"[Term", b"[Typedef"))
def parse_from(self, handle):
# Load the OBO document through an iterator using fastobo
doc = fastobo.iter(handle)
# Extract metadata from the OBO header and resolve imports
self.ont.metadata = self.extract_metadata(doc.header())
self.ont.imports.update(
self.process_imports(
self.ont.metadata.imports,
self.ont.import_depth,
os.path.dirname(self.ont.path or str()),
self.ont.timeout,
)
)
# Extract frames from the current document.
try:
for frame in doc:
if isinstance(frame, fastobo.term.TermFrame):
self.enrich_term(frame)
elif isinstance(frame, fastobo.typedef.TypedefFrame):
self.enrich_relationship(frame)
except SyntaxError as s:
location = self.ont.path, s.lineno, s.offset, s.text
raise SyntaxError(s.args[0], location) from None
```
#### File: pronto/pronto/pv.py
```python
import functools
import fastobo
from .utils.meta import roundrepr, typechecked
class PropertyValue(object):
"""A property-value, which adds annotations to an entity.
"""
property: str
__slots__ = ("__weakref__", "property")
@roundrepr
@functools.total_ordering
class LiteralPropertyValue(PropertyValue):
"""A property-value which adds a literal annotation to an entity.
"""
literal: str
datatype: str
__slots__ = ("literal", "datatype")
@typechecked()
def __init__(self, property: str, literal: str, datatype: str = "xsd:string"):
"""Create a new `LiteralPropertyValue` instance.
Arguments:
property (str): The annotation property, as an OBO identifier.
literal (str): The serialized value of the annotation.
datatype (str): The datatype of the annotation property value.
Defaults to `xsd:string`.
"""
self.property = str(fastobo.id.parse(property))
self.literal = literal
self.datatype = str(fastobo.id.parse(datatype))
def __eq__(self, other: object) -> bool:
if isinstance(other, LiteralPropertyValue):
return (
self.property == other.property
and self.literal == other.literal
and self.datatype == other.datatype
)
return False
def __lt__(self, other: object) -> bool:
if isinstance(other, LiteralPropertyValue):
return (self.property, self.literal, self.datatype) < (
other.property,
other.literal,
other.datatype,
)
elif isinstance(other, ResourcePropertyValue):
return self.property < other.property
else:
return NotImplemented
def __hash__(self) -> int:
return hash((LiteralPropertyValue, self.property, self.literal, self.datatype))
@roundrepr
@functools.total_ordering
class ResourcePropertyValue(PropertyValue):
"""A property-value which adds a resource annotation to an entity.
"""
resource: str
__slots__ = ("resource",)
@typechecked()
def __init__(self, property: str, resource: str):
"""Create a new `ResourcePropertyValue` instance.
Arguments:
property (str): The annotation property, as an OBO identifier.
resource (str): The annotation entity value, as an OBO identifier.
"""
self.property = str(fastobo.id.parse(property))
self.resource = str(fastobo.id.parse(resource))
def __eq__(self, other: object) -> bool:
if isinstance(other, ResourcePropertyValue):
return (self.property, self.resource) == (other.property, other.resource)
return False
def __lt__(self, other: object) -> bool:
if isinstance(other, ResourcePropertyValue):
return (self.property, self.resource) < (other.property, other.resource)
elif isinstance(other, LiteralPropertyValue):
return self.property < other.property
else:
return NotImplemented
def __hash__(self) -> int:
return hash((LiteralPropertyValue, self.property, self.resource))
```
#### File: pronto/utils/iter.py
```python
from collections.abc import Sized
from typing import TypeVar, Generic, Iterator
S = TypeVar("S")
T = TypeVar("T")
class SizedIterator(Generic[T], Iterator[T], Sized):
"""A wrapper for iterators which lengths is known in advance.
"""
def __init__(self, it: Iterator[T], length: int):
self._it = it
self._length = length
def __len__(self) -> int:
return self._length
def __length_hint__(self) -> int:
return self._length
def __iter__(self: S) -> S:
return self
def __next__(self) -> T:
val = next(self._it)
self._length -= 1
return val
```
#### File: pronto/tests/test_ontology.py
```python
import os
import sys
import pickle
import unittest
import warnings
import pronto
from pronto.logic.lineage import Lineage
from .utils import DATADIR
class TestOntology(unittest.TestCase):
@classmethod
def setUpClass(cls):
warnings.simplefilter('error')
warnings.simplefilter('ignore', category=UnicodeWarning)
cls.file = open(os.path.join(DATADIR, "ms.obo"), "rb")
cls.ms = pronto.Ontology(cls.file)
@classmethod
def tearDownClass(cls):
cls.file.close()
warnings.simplefilter(warnings.defaultaction)
def test_inheritance_caching(self):
ont = pronto.Ontology()
self.assertEqual(ont._inheritance, {})
t1 = ont.create_term("TST:001")
self.assertEqual(ont._inheritance, {t1.id: Lineage()})
t2 = ont.create_term("TST:002")
self.assertEqual(ont._inheritance, {t1.id: Lineage(), t2.id: Lineage()})
t2.relationships = { ont["is_a"]: [t1] }
self.assertEqual(ont._inheritance, {
t1.id: Lineage(sub={t2.id}),
t2.id: Lineage(sup={t1.id})
})
t2.relationships = {}
self.assertEqual(ont._inheritance, {t1.id: Lineage(), t2.id: Lineage()})
class TestPickling(object):
@classmethod
def setUpClass(cls):
warnings.simplefilter('error')
warnings.simplefilter('ignore', category=UnicodeWarning)
cls.file = open(os.path.join(DATADIR, "ms.obo"), "rb")
cls.ms = pronto.Ontology(cls.file)
@classmethod
def tearDownClass(cls):
cls.file.close()
warnings.simplefilter(warnings.defaultaction)
# ------------------------------------------------------------------------
def _test_memory_pickling(self, protocol):
ont = pronto.Ontology()
t1 = ont.create_term("TST:001")
t1.name = "<NAME>"
pickled = pickle.dumps(ont, protocol=protocol)
unpickled = pickle.loads(pickled)
self.assertEqual(ont.keys(), unpickled.keys())
self.assertEqual(ont["TST:001"], unpickled["TST:001"])
self.assertEqual(ont["TST:001"].name, unpickled["TST:001"].name)
def test_memory_pickling_3(self):
self._test_memory_pickling(3)
def test_memory_pickling_4(self):
self._test_memory_pickling(4)
@unittest.skipIf(sys.version_info < (3, 8), "protocol 5 requires Python 3.8+")
def test_memory_pickling_5(self):
self._test_memory_pickling(5)
# ------------------------------------------------------------------------
def _test_file_pickling(self, protocol):
pickled = pickle.dumps(self.ms, protocol=protocol)
unpickled = pickle.loads(pickled)
self.assertEqual(self.ms.keys(), unpickled.keys())
for key in self.ms.keys():
term_ms, term_pickled = self.ms[key]._data(), unpickled[key]._data()
for attr in term_ms.__annotations__:
attr_ms = getattr(term_ms, attr)
attr_pickled = getattr(term_pickled, attr)
self.assertEqual(attr_ms, attr_pickled)
def test_file_pickling_3(self):
self._test_file_pickling(3)
def test_file_pickling_4(self):
self._test_file_pickling(4)
@unittest.skipIf(sys.version_info < (3, 8), "protocol 5 requires Python 3.8+")
def test_file_pickling_5(self):
self._test_file_pickling(5)
``` |
{
"source": "jigi-33/wemake-python-styleguide",
"score": 3
} |
#### File: test_statements/test_parameters_indentation/test_function_indentation.py
```python
import pytest
from wemake_python_styleguide.compat.constants import PY38
from wemake_python_styleguide.visitors.ast.statements import (
ParametersIndentationViolation,
WrongParametersIndentationVisitor,
)
# Correct:
correct_single_line_function = 'def test(arg, *args, kw, **kwargs): ...'
correct_multi_line_function = """
def test(
arg,
*args,
kw,
**kwargs,
): ...
"""
correct_multi_line_function_with_posonly = """
def test(
arg1,
/,
arg2,
*args,
kw,
**kwargs,
): ...
"""
correct_multi_line_function_with_defaults = """
def test(
arg1,
arg2=True,
*args,
kw1,
kw2=True,
**kwargs,
): ...
"""
correct_next_line_function = """
def test(
arg, *args, kw, **kwargs,
): ...
"""
# Wrong:
wrong_function_indentation1 = """
def test(arg,
*args, kw, **kwargs): ...
"""
wrong_function_indentation2 = """
def test(arg, *args,
kw, **kwargs): ...
"""
wrong_function_indentation3 = """
def test(arg, *args, kw,
**kwargs): ...
"""
wrong_function_indentation4 = """
def test(
arg, *args,
kw, **kwargs,
): ...
"""
wrong_function_indentation5 = """
def test(
arg,
*args,
kw, **kwargs,
): ...
"""
wrong_function_indentation6 = """
def test(
arg, *args,
kw,
**kwargs,
): ...
"""
wrong_function_indentation7 = """
def test(
arg, *args, kw,
**kwargs,
): ...
"""
wrong_function_indentation8 = """
def test(
arg1, /,
arg2, *args, kw, **kwargs,
): ...
"""
@pytest.mark.parametrize('code', [
correct_single_line_function,
correct_multi_line_function,
correct_multi_line_function_with_defaults,
correct_next_line_function,
pytest.param(
correct_multi_line_function_with_posonly,
marks=pytest.mark.skipif(not PY38, reason='posonly appeared in 3.8'),
),
])
def test_correct_function_indentation(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that correctly indented functions work."""
tree = parse_ast_tree(code)
visitor = WrongParametersIndentationVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
wrong_function_indentation1,
wrong_function_indentation2,
wrong_function_indentation3,
wrong_function_indentation4,
wrong_function_indentation5,
wrong_function_indentation6,
wrong_function_indentation7,
pytest.param(
wrong_function_indentation8,
marks=pytest.mark.skipif(not PY38, reason='posonly appeared in 3.8'),
),
])
def test_wrong_function_indentation(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that poorly indented functions do not work."""
tree = parse_ast_tree(code)
visitor = WrongParametersIndentationVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ParametersIndentationViolation])
``` |
{
"source": "jigiciak/noise_reduction",
"score": 3
} |
#### File: jigiciak/noise_reduction/create_dataset.py
```python
import os
import numpy as np
from signal_utils import save_audio
from signal_utils import audio_files_to_numpy
from signal_utils import blend_voice_with_noise
from signal_utils import numpy_audio_to_matrix_spectrogram
import argparse
def make_dir(path):
try:
os.makedirs(path)
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("Successfully created the directory %s " % path)
def prepare_folders():
make_dir('./data/train/timeseries/')
make_dir('./data/train/combined_sound/')
make_dir('./data/train/spectogram/')
make_dir('./data/test/timeseries/')
make_dir('./data/test/combined_sound/')
make_dir('./data/test/spectogram/')
def create_data_from_folder(data_type, nb_samples=1000):
clean_voice_path = f"./data/{data_type}/clean_voice/"
noise_path = f"./data/{data_type}/noise/"
output_timeseries_path = f"./data/{data_type}/timeseries/"
output_sound_path = f"./data/{data_type}/combined_sound/"
output_spectogram = f"./data/{data_type}/spectogram/"
create_data(clean_voice_path, noise_path, output_timeseries_path, output_sound_path, output_spectogram, nb_samples=nb_samples)
def create_data(clean_voice_path, noise_path, output_timeseries_path, output_sound_path, output_spectogram_path,
sample_rate=8000,
min_duration=1.0, frame_length=8064, jump_length_frame=8064, jump_length_frame_noise=5000,
nb_samples=1000,
n_fft=255, jump_length_fft=63):
list_noise_files = os.listdir(noise_path)
list_voice_files = os.listdir(clean_voice_path)
# Convert signals to numpy arrays
noise = audio_files_to_numpy(noise_path, list_noise_files, sample_rate,
frame_length, jump_length_frame_noise, min_duration)
voice = audio_files_to_numpy(clean_voice_path, list_voice_files,
sample_rate, frame_length, jump_length_frame, min_duration)
# Blending clean voice with noises (data augmentation)
prod_voice, prod_noise, prod_noisy_voice = blend_voice_with_noise(
voice, noise, nb_samples, frame_length)
# Save generated sounds
noisy_voice_long = prod_noisy_voice.reshape(1, nb_samples * frame_length)
save_audio(output_sound_path + 'noisy_voice_long.wav', noisy_voice_long[0, :], sample_rate)
voice_long = prod_voice.reshape(1, nb_samples * frame_length)
save_audio(output_sound_path + 'voice_long.wav', voice_long[0, :], sample_rate)
noise_long = prod_noise.reshape(1, nb_samples * frame_length)
save_audio(output_sound_path + 'noise_long.wav', noise_long[0, :], sample_rate)
# Spectrogram dimensions
spectrogram_dimension = int(n_fft / 2) + 1
# Create spectrograms
m_amp_db_voice, m_pha_voice = numpy_audio_to_matrix_spectrogram(
prod_voice, spectrogram_dimension, n_fft, jump_length_fft)
m_amp_db_noise, m_pha_noise = numpy_audio_to_matrix_spectrogram(
prod_noise, spectrogram_dimension, n_fft, jump_length_fft)
m_amp_db_noisy_voice, m_pha_noisy_voice = numpy_audio_to_matrix_spectrogram(
prod_noisy_voice, spectrogram_dimension, n_fft, jump_length_fft)
# Save to disk for training/testing
np.save(output_timeseries_path + 'voice_timeserie', prod_voice)
np.save(output_timeseries_path + 'noise_timeserie', prod_noise)
np.save(output_timeseries_path + 'noisy_voice_timeserie', prod_noisy_voice)
np.save(output_spectogram_path + 'voice_amp_db', m_amp_db_voice)
np.save(output_spectogram_path + 'noise_amp_db', m_amp_db_noise)
np.save(output_spectogram_path + 'noisy_voice_amp_db', m_amp_db_noisy_voice)
np.save(output_spectogram_path + 'voice_pha_db', m_pha_voice)
np.save(output_spectogram_path + 'noise_pha_db', m_pha_noise)
np.save(output_spectogram_path + 'noisy_voice_pha_db', m_pha_noisy_voice)
if __name__ == '__main__':
prepare_folders()
parser = argparse.ArgumentParser(description='Prepare test or train dataset')
parser.add_argument('--test', action='store_true')
parser.add_argument('--train', action='store_true')
parser.add_argument('--nb_samples', type=int, default=1000)
args = parser.parse_args()
test_flag = args.test
train_flag = args.train
nb_samples = args.nb_samples
if test_flag:
print("Creating test dataset")
create_data_from_folder("test", nb_samples=nb_samples)
if train_flag:
print("Creating train dataset")
create_data_from_folder("train", nb_samples=nb_samples)
```
#### File: jigiciak/noise_reduction/test_prediction.py
```python
from prediction_denoise import prediction
def predict(name_model='model_unet', audio_input_prediction='noisy_voice_bells28.wav',
audio_output_prediction='val_voice_bells28.wav', sr=8000):
# Example: python main.py --mode="prediction"
# path to find pre-trained weights / save models
weights_path = './data/weights'
# pre trained model
name_model = name_model
# directory where read noisy sound to denoise
audio_dir_prediction = './data/validation/noisy_voice'
# directory to save the denoise sound
dir_save_prediction = './data/validation/save_predictions/'
# Name noisy sound file to denoise
audio_input_prediction = [audio_input_prediction]
# Name of denoised sound file to save
audio_output_prediction = audio_output_prediction
# Sample rate to read audio
sample_rate = sr
# Minimum duration of audio files to consider
min_duration = 1.0
# Frame length for training data
frame_length = 8064
# hop length for sound files
hop_length_frame = 8064
# nb of points for fft(for spectrogram computation)
n_fft = 255
# hop length for fft
# hop_length_fft = 63
hop_length_fft = 63
prediction(weights_path, name_model, audio_dir_prediction, dir_save_prediction, audio_input_prediction,
audio_output_prediction, sample_rate, min_duration, frame_length, hop_length_frame, n_fft,
hop_length_fft)
if __name__ == '__main__':
predict()
``` |
{
"source": "jigillespie/fonttools-utils",
"score": 3
} |
#### File: fonttools-utils/mac-os-x-system-font-replacer/MacOSXSystemFontReplacer.py
```python
VERSION = "1.1"
"""MacOSXSystemFontReplacer.py
Version %(version)s
Copyright (c) 2015 by <NAME> <<EMAIL>>
Licensed under the Apache 2 license.
""" % {"version": VERSION}
import argparse
import os
import os.path
import fontTools.ttLib
import subprocess
parser = argparse.ArgumentParser()
parser.description = """With %(prog)s v""" + VERSION + """,
you can replace your system UI fonts of Mac OS X 10.10 Yosemite with any fonts
of your choice. You can supply any fonts to the tool as long as they have filenames in a format that
the tool expects. Please run %(prog)s -H for the exact file names you should provide. The tool is 'safe' i.e.
it does not modify the original system UI fonts. Instead, it writes patched versions of the fonts you
provide into the System library folder, but you can easily uninstall those later manually or using the tool itself.
"""
parser.add_argument("-i", "--input-folder",
help="path to folder with your input font files, default is %(default)s",
default=os.getcwd()
)
parser.add_argument("-s", "--font-size",
help="scale the relative font size in %%, default is %(default)s, use a higher value such as 105 to optically increase your patched UI fonts",
type=int,
default=100
)
parser.add_argument("-o", "--output-folder",
help="custom path to folder where your patched fonts will be installed, default is %(default)s and you should run the tool as: sudo %(prog)s",
default="/Library/Fonts/"
)
parser.add_argument("-H", "--help-more",
help="print the file names that the tool expects to find in the -i INPUT_FOLDER",
action="store_true",
default=False
)
parser.add_argument("-u", "--uninstall",
help="uninstall previously patched fonts from system folder or the -o OUTPUT_FOLDER",
action="store_true",
default=False
)
parser.add_argument("-c", "--no-reset-caches",
help="do not reset OS X font caches (useful if you are testing and using a custom -o OUTPUT_FOLDER)",
action="store_true",
default=True
)
parser.add_argument("-t", "--system-ttc",
help="custom path to %(default)s (which may be different from current default in future versions of OS X)",
default="/System/Library/Fonts/HelveticaNeueDeskInterface.ttc"
)
args = parser.parse_args()
def patchFont(ttcPath, fontNumber, inFolder, outFolder, help, fontScale, uninstall):
success = False
try:
ttxIn = fontTools.ttLib.TTFont(ttcPath, fontNumber=fontNumber)
except fontTools.ttLib.TTLibError:
ttxIn = None
if ttxIn:
fontName = ""
nameRecord = ttxIn["name"].getName(4, 1, 0, 0)
if nameRecord:
fontName = nameRecord.string
else:
nameRecord = ttxIn["name"].getName(4, 0, 3, 0)
if nameRecord:
fontName = unicode(
nameRecord.string, 'utf-16-be').encode('utf-8')
if help:
print('"%s.otf"' % fontName)
elif uninstall:
fontPathOut = os.path.join(outFolder, "%s.otf" % (fontName))
if os.path.exists:
try:
os.remove(fontPathOut)
print("Uninstalled: %s" % (fontPathOut))
success = True
except:
print(
"Cannot uninstall: %s (try running tool with 'sudo'!)" % (fontPathOut))
else:
fontPath = os.path.join(inFolder, "%s.otf" % (fontName))
if os.path.exists(fontPath):
ttxOut = fontTools.ttLib.TTFont(fontPath)
scaleFactor = (
float(ttxOut["head"].unitsPerEm)/ttxIn["head"].unitsPerEm)/fontScale
ttxOut[
"OS/2"].sTypoAscender = int(ttxIn["OS/2"].sTypoAscender * scaleFactor + 0.5)
ttxOut[
"OS/2"].sTypoDescender = int(ttxIn["OS/2"].sTypoDescender * scaleFactor + 0.5)
ttxOut[
"OS/2"].sTypoLineGap = int(ttxIn["OS/2"].sTypoLineGap * scaleFactor + 0.5)
ttxOut[
"OS/2"].usWinAscent = int(ttxIn["OS/2"].usWinAscent * scaleFactor + 0.5)
ttxOut[
"OS/2"].usWinDescent = int(ttxIn["OS/2"].usWinDescent * scaleFactor + 0.5)
ttxOut["hhea"].ascent = int(
ttxIn["hhea"].ascent * scaleFactor + 0.5)
ttxOut["hhea"].descent = int(
ttxIn["hhea"].descent * scaleFactor + 0.5)
ttxOut["hhea"].lineGap = int(
ttxIn["hhea"].lineGap * scaleFactor + 0.5)
ttxOut["name"] = ttxIn["name"]
ttxOut["head"].unitsPerEm = int(
ttxOut["head"].unitsPerEm / fontScale + 0.5)
if "CFF " in ttxOut:
cff = ttxOut["CFF "].cff
cffd = cff[cff.fontNames[0]].rawDict
cffd["FontMatrix"][0] = cffd["FontMatrix"][0] * fontScale
cffd["FontMatrix"][3] = cffd["FontMatrix"][3] * fontScale
psName = None
psNameRecord = ttxIn["name"].getName(6, 1, 0, 0)
if psNameRecord:
psName = psNameRecord.string
else:
psNameRecord = ttxIn["name"].getName(6, 0, 3, 0)
if psNameRecord:
psName = unicode(
psNameRecord.string, 'utf-16-be').encode('utf-8')
if psName:
cff.fontNames[0] = psName
fullName = None
fullNameRecord = ttxIn["name"].getName(4, 1, 0, 0)
if fullNameRecord:
fullName = fullNameRecord.string
else:
fullNameRecord = ttxIn["name"].getName(4, 0, 3, 0)
if fullNameRecord:
fullName = unicode(
fullNameRecord.string, 'utf-16-be').encode('utf-8')
if fullName:
cffd["FullName"] = fullName
famName = None
famNameRecord = ttxIn["name"].getName(1, 1, 0, 0)
if famNameRecord:
famName = famNameRecord.string
else:
famNameRecord = ttxIn["name"].getName(1, 0, 3, 0)
if famNameRecord:
famName = unicode(
famNameRecord.string, 'utf-16-be').encode('utf-8')
if famName:
cffd["FamilyName"] = famName
fontPathOut = os.path.join(outFolder, "%s.otf" % (fontName))
try:
ttxOut.save(fontPathOut)
print('Saved patched: "%s"' % (fontPathOut))
success = True
except:
print(
"Cannot save: %s (try running tool with 'sudo'!)" % (fontPathOut))
else:
print('Not found: "%s"' % (fontPath))
return (ttxIn, success)
if args.help_more:
print("This tool expects your font files with the following file names to be present in\n%s :" % (
args.input_folder))
fontNumber = 0
patchedFonts = 0
while True:
(opened, patched) = patchFont(ttcPath=args.system_ttc, fontNumber=fontNumber, inFolder=args.input_folder,
outFolder=args.output_folder, help=args.help_more, fontScale=float(args.font_size)/100, uninstall=args.uninstall)
if opened:
fontNumber += 1
else:
break
if patched:
patchedFonts += 1
if args.help_more:
print("The tool will match each of your fonts to one of the internal Mac OS X UI fonts\n(stored in %s)\nusing the filename.\nThen it will try to patch your fonts and install them in %s.\nAfter logging out and back in, you should see your new UI fonts." % (
args.system_ttc, args.output_folder))
else:
if args.uninstall:
print("Uninstalled %d fonts" % (patchedFonts))
else:
print("Finished patching %d fonts" % (patchedFonts))
if not args.no_reset_caches:
subprocess.call(["sudo", "atsutil", "databases", "-remove"])
print(
"Mac OS X font caches have been reset. Please log out of Mac OS X and log in again.")
``` |
{
"source": "jignatius/Dobby",
"score": 2
} |
#### File: process/settings/merge_settings.py
```python
import argparse
import pathlib
import json
from sys import exit
def main():
parser = argparse.ArgumentParser()
parser.add_argument('files', metavar='filepath', type=pathlib.Path, nargs='+')
parser.add_argument('--base', type=pathlib.Path, required=True)
parser.add_argument('--output', type=pathlib.Path, required=True)
args = parser.parse_args()
# Check we were given valid files
if not args.base.is_file():
print("ERROR: File not found:", str(args.base))
exit(1)
invalid_files = [x for x in args.files if not x.is_file()]
if invalid_files:
print("ERROR: File(s) not found:", [str(x) for x in invalid_files])
exit(1)
# Load the json to dicts
loaded_json = []
for file in args.files:
with file.open() as json_file:
loaded_json.append(json.load(json_file))
base_settings = {}
with args.base.open() as json_file:
base_settings = json.load(json_file)
# Merge everything into one
merged = merge_dicts(base_settings, loaded_json)
# Save the file
with args.output.open('w') as output_file:
json.dump(merged, output_file, ensure_ascii=False, indent=4)
def merge_dicts(base_file, list_dicts_to_merge):
"""
Taking an original settings file, merge settings file(s) on top of the original
without deleting any of the original settings file
E.G if the original settings file containers:
foo: {
mySettings: ['A', 'B']
}
and the append file contains:
foo: {
mySettings: ['C']
}
the final result will be:
foo: {
mySettings: ['A', 'B', 'C']
}
"""
result = base_file
for dictionary in list_dicts_to_merge:
do_merge(result, dictionary)
return result
def do_merge(result, to_merge):
"""
Recursively merge the dictionaries together
"""
for k, v in to_merge.items():
if isinstance(v, list):
if k not in result:
result[k] = []
result[k].extend([x for x in v if x not in result[k]])
elif isinstance(v, dict):
if k not in result:
result[k] = {}
do_merge(result[k], v)
else:
result[k] = v
if __name__ == "__main__":
main()
``` |
{
"source": "Jignesh1996/bcmd-web",
"score": 3
} |
#### File: bayescmd/abc/distances.py
```python
import numpy as np
from .data_import import *
# All functions here can expect to handle the output from BCMD Model i.e.
# a dict.
def euclidean_dist(data1, data2):
"""
Gives the euclidean distance between two numpy arrays.
:param data1: Numpy array for data1
:type data1: np.ndarray
:param data2: Numpy array for data2
:type data2: np.ndarray
:return: Euclidean distance measure
:rtype: list of float
"""
try:
assert(data1.shape == data2.shape), 'Arrays not of equal size'
except AssertionError as e:
print(e)
print("\tData 1: ", data1.shape)
print("\tData 2: ", data2.shape)
return np.sum(np.sqrt(np.sum((data1 - data2) * (data1 - data2), axis=1)))
def manhattan_dist(data1, data2):
"""
Gives the Manhattan distance between two numpy arrays.
:param data1: Numpy array for data1
:type data1: np.ndarray
:param data2: Numpy array for data2
:type data2: np.ndarray
:return: Manhattan distance measure
:rtype: list of float
"""
assert(data1.shape == data2.shape), 'Arrays not of equal size'
return np.sum(np.abs(data1 - data2))
def mean_square_error_dist(data1, data2):
"""
Gives the mean squared error between two numpy arrays.
:param data1: Numpy array for data1
:type data1: np.ndarray
:param data2: Numpy array for data2
:type data2: np.ndarray
:return: MSE distance measure
:rtype: list of float
"""
assert(data1.shape == data2.shape), 'Arrays not of equal size'
n = data1.shape[1]
return np.sum(1 / n * np.sum((data1 - data2) * (data1 - data2), axis=1))
def mean_absolute_error_dist(data1, data2):
"""
Gives the normalised manhattan distance between two numpy arrays.
:param data1: Numpy array for data1
:type data1: np.ndarray
:param data2: Numpy array for data2
:type data2: np.ndarray
:return: MAE distance measure
:rtype: list of float
"""
assert(data1.shape == data2.shape), 'Arrays not of equal size'
n = data1.shape[1]
return 1 / n * np.sum(np.abs(data1 - data2))
DISTANCES = {
'euclidean': euclidean_dist,
'manhattan': manhattan_dist,
'MSE': mean_square_error_dist,
'MAE': mean_absolute_error_dist
}
def check_for_key(dictionary, target):
try:
data = dictionary[target]
except KeyError:
print('Actual data does not contain target value.')
return data
def zero_array(array):
"""
Method to zero an array of data with the initial values.
:param array: Array of data - rows are time points, columns are signals.
:return: Zero'd numpy array
:rtype: np.ndarray
"""
init = array[:, 0]
zerod = np.apply_along_axis(lambda x: x - init, 0, array)
return zerod
def get_distance(actual_data, sim_data, targets,
distance='euclidean', zero=False):
d0 = []
d_star = []
for idx, k in enumerate(targets):
d0.append(check_for_key(actual_data, k))
d_star.append(check_for_key(sim_data, k))
if zero:
try:
d_star = zero_array(np.array(d_star))
except (TypeError, IndexError):
print('Invalid Data')
return (float('NaN'))
return DISTANCES[distance](np.array(d0), np.array(d_star))
```
#### File: bayescmd/bcmdModel/bcmd_model.py
```python
import numpy
import numpy.random
import pprint
import tempfile
import shutil
import csv
import os
import copy
import sys
import datetime
import subprocess
from io import StringIO
import collections
from .input_creation import InputCreator
from ..util import findBaseDir
# default timeout, in seconds
TIMEOUT = 30
# default base directory - this should be a relative directory path
# leading to bcmd/
BASEDIR = findBaseDir(os.environ['BASEDIR'])
class ModelBCMD:
"""
BCMD model class. this can be used to create inputs, run simulations etc.
"""
def __init__(self,
model_name,
inputs=None, # Input variables
params=None, # Parameters
times=None, # Times to run simulation at
outputs=None,
burn_in=999,
create_input=True,
input_file=None,
suppress=False,
workdir=None, # default is to create a temp directory
# not quite sure when the best time for this is, probably in
# __del__?
deleteWorkdir=False,
timeout=TIMEOUT,
basedir=BASEDIR,
debug=False,
testing=False):
self.model_name = model_name
self.params = params # dict of non-default params
self.inputs = inputs # any time dependent inputs to the model
self.times = times
self.outputs = outputs
self.burn_in = burn_in
# Determine if input file is present already or if it needs creating
self.create_input = create_input
# Suppression of output files
self.suppress = suppress
if suppress:
self.DEVNULL = open(os.devnull, 'wb')
# we need working space; we may want to kill it later
self.deleteWorkdir = deleteWorkdir
if workdir:
self.workdir = workdir
if not os.path.exists(workdir):
os.makedirs(workdir)
else:
self.workdir = tempfile.mkdtemp(prefix=model_name)
self.deleteWorkdir = True
if debug:
print('TEMP DIR: ', self.workdir)
self.timeout = timeout
self.debug = debug
if input_file is not None:
self.input_file = input_file
elif create_input:
self.input_file = os.path.join(
self.workdir, self.model_name + '.input')
else:
self.input_file = None
if testing:
TEST_PRE = '_test'
else:
TEST_PRE = ''
self.basedir = basedir
self.program = os.path.join(
self.basedir, 'build', self.model_name + '.model')
self.output_coarse = os.path.join(
self.workdir, self.model_name + TEST_PRE + '.out')
self.output_detail = os.path.join(
self.workdir, self.model_name + TEST_PRE + '.detail')
self.output_dict = collections.defaultdict(list)
def _cleanupTemp(self):
if self.deleteWorkdir:
shutil.rmtree(self.workdir)
return None
def get_defaults(self):
print('GETTING MODEL DEFAULTS.\n')
return subprocess.run([self.program, '-s'], stdout=subprocess.PIPE)
def write_default_input(self):
"""
Function to write a default input to file.
"""
# Ensure that any existing input files aren't overwritten
try:
assert os.path.exists(self.input_file)
new_input = os.path.splitext(self.input_file)[
0] + '_{:%H%M%S-%d%m%y}.input'.format(datetime.datetime.now())
print('Input file %s already exists.\n Renaming as %s' %
(self.input_file, new_input))
input_creator = InputCreator(self.times, self.inputs,
filename=new_input)
input_creator.default_creation()
self.input_file = input_creator.input_file_write()
except AssertionError:
input_creator = InputCreator(self.times,
self.inputs,
filename=self.input_file)
input_creator.default_creation()
input_creator.input_file_write()
return True
def create_default_input(self):
"""
Method to create input file and write to string buffer for acces
direct from memory.
"""
input_creator = InputCreator(self.times, self.inputs)
self.input_file = input_creator.default_creation().getvalue()
return self.input_file
def write_initialised_input(self):
"""
Function to write a default input to file.
"""
# Ensure that any existing input files aren't overwritten
try:
assert os.path.exists(self.input_file)
new_input = os.path.splitext(self.input_file)[
0] + '_{:%H%M%S-%d%m%y}.input'.format(datetime.datetime.now())
print('Input file %s already exists.\n Renaming as %s' %
(self.input_file, new_input))
input_creator = InputCreator(self.times, self.inputs,
params=self.params,
outputs=self.outputs,
filename=new_input)
input_creator.initialised_creation(self.burn_in)
self.input_file = input_creator.input_file_write()
except AssertionError:
input_creator = InputCreator(self.times,
self.inputs,
params=self.params,
outputs=self.outputs,
filename=self.input_file)
input_creator.initialised_creation(self.burn_in)
input_creator.input_file_write()
return True
def create_initialised_input(self):
"""
Method to create input file and write to string buffer for access
direct from memory.
"""
input_creator = InputCreator(self.times, self.inputs,
params=self.params, outputs=self.outputs)
f_out = input_creator.initialised_creation(self.burn_in)
if self.debug:
print(f_out.getvalue(), file=sys.stderr)
f_out.seek(0)
self.input_file = f_out.getvalue()
pprint.pprint(self.input_file)
return self.input_file
def run_from_file(self):
try:
assert os.path.exists(self.input_file)
if self.debug:
print("\n\nOutput goes to:\n\tCOARSE:%s\n\tDETAIL:%s\n\n" %
(self.output_coarse, self.output_detail))
if self.suppress:
# invoke the model program as a subprocess
succ = subprocess.run([self.program,
'-i', self.input_file,
'-o', self.output_coarse,
'-d', self.output_detail],
stdout=self.DEVNULL,
stderr=self.DEVNULL,
timeout=self.timeout)
else:
stdoutname = os.path.join(
self.workdir, '%s.stdout' % (self.model_name))
stderrname = os.path.join(
self.workdir, '%s.stderr' % (self.model_name))
# if opening these files fails, we may be in trouble anyway
# but don't peg out just because of this -- let the the failure
# happen somewhere more important
try:
f_out = open(stdoutname, 'w')
except IOError:
f_out = None
try:
f_err = open(stderrname, 'w')
except IOError:
f_err = None
# invoke the model program as a subprocess
succ = subprocess.run([self.program,
'-i', self.input_file,
'-o', self.output_coarse,
'-d', self.output_detail],
stdout=f_out,
stderr=f_err,
timeout=self.timeout)
if f_out:
f_out.close()
if f_err:
f_err.close()
except AssertionError:
print("Input file doesn't exist. Can't run from file.")
return None
def run_from_buffer(self):
# Ensure that input file has seeked to 0
if self.debug:
print("Output goes to:\n\tCOARSE:%s\n\tDETAIL:%s" %
(self.output_coarse, self.output_detail))
if self.suppress:
# invoke the model program as a subprocess
result = subprocess.run([self.program,
'-I'],
input=self.input_file.encode(),
stdout=subprocess.PIPE,
stderr=self.DEVNULL,
timeout=self.timeout)
else:
stderrname = os.path.join(
self.workdir, '%s.stderr' % ("buffer_" + self.model_name))
# if opening these files fails, we may be in trouble anyway
# but don't peg out just because of this -- let the the failure
# happen somewhere more important
try:
f_err = open(stderrname, 'w')
except IOError:
f_err = None
# invoke the model program as a subprocess
result = subprocess.run([self.program,
'-I',
'-d', self.output_detail],
input=self.input_file.encode(),
stdout=subprocess.PIPE,
stderr=f_err,
timeout=self.timeout)
if f_err:
f_err.close()
self.output_coarse = StringIO(result.stdout.decode())
if self.debug:
pprint.pprint('OUTPUT: ' + self.output_coarse.getvalue(), stream=sys.stderr)
self.output_coarse.seek(0)
return result
def output_parse(self):
"""
Function to parse the output files into a dictionary.
"""
# Check if file is open
try:
file_out = open(self.output_coarse)
except TypeError:
file_out = self.output_coarse
if self.debug:
pprint.pprint(file_out.read(), stream=sys.stderr)
file_out.seek(0)
for d in csv.DictReader(file_out, delimiter='\t'):
for key, value in d.items():
if key == 'ERR':
pass
else:
try:
self.output_dict[key].append(float(value))
except (ValueError, TypeError) as e:
self.output_dict[key].append('NaN')
self._cleanupTemp()
return self.output_dict
```
#### File: bcmd-web/bparser/codegen.py
```python
import sys
import os
import decimal
import string
import logger
# template configuration: in theory this stuff could be
# modified at runtime, though in practice that seems unlikely
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
TEMPLATE_DIR = os.path.abspath(THIS_DIR + '/templates')
TEMPLATES = [ '01_header.c_template', '02_errors.c_template', '03_prototypes.c_template', '05_functions.c_template' ]
# generate the C code from a parsed model
# much of the code is unchanging boilerplate, and much of that
# is simply copied direct from several template files
# where the code is dependent on the model it is constructed
# in a bunch of subsidiary functions below -- at present
# these are a rather ugly mix of literal C strings and
# variable substitutions -- maybe look into making this
# less horrible in future?
def generateSource(model, config, template_dir=TEMPLATE_DIR):
f = open(template_dir + '/' + TEMPLATES[0])
src = f.read()
f.close()
f = open(template_dir + '/' + TEMPLATES[1])
src = src + f.read()
f.close()
src = src + generateModelVars(model, config)
f = open(template_dir + '/' + TEMPLATES[2])
src = src + f.read()
f.close()
src = src + generateEmbeds(model)
src = src + generateModelFuncs(model, config)
f = open(template_dir + '/' + TEMPLATES[3])
src = src + f.read()
f.close()
return src
# generate the model variables segment
def generateModelVars(model, config):
diffcount = len(model['diffs'])
algcount = len(model['algs'])
symcount = len(model['symlist'])
varcount = diffcount + algcount
src = '/* Model-specific constants and statics */\n'
src = src + 'const char* MODEL_NAME = "' + config['name'] + '";\n'
if model['version']:
src = src + 'const char* MODEL_VERSION = "' + model['version'] + '";\n'
else:
src = src + 'const char* MODEL_VERSION = "(version not specified)";\n'
if model['diagonal']:
src = src + 'const int DIAGONAL = 1;\n'
src = src + 'const int REQUIRE_MASS = 0;\n'
else:
src = src + 'const int DIAGONAL = 0;\n'
src = src + 'const int REQUIRE_MASS = 1;\n'
src = src + 'const unsigned int DIFF_EQ_COUNT = ' + str(diffcount) + ';\n'
src = src + 'const unsigned int ALGEBRAIC_COUNT = ' + str(algcount) + ';\n'
src = src + 'const unsigned int VAR_COUNT = ' + str(diffcount + algcount) + ';\n'
src = src + 'const unsigned int SYMBOL_COUNT = ' + str(symcount) + ';\n\n'
src = src + 'static char* SYMBOLS[' + str(symcount) + '] = \n{\n'
src = src + formatArray(model['symlist'])
src = src + '};\n\n'
src = src + 'static char* ROOTS[' + str(varcount) + '] = \n{\n'
src = src + formatArray(model['diffs'] + model['algs'])
src = src + '};\n\n'
if model['intermeds']:
src = src + 'static double INTERMEDIATES[' + str(len(model['intermeds'])) + '] = {0};\n\n'
indices = [0]
for name in model['outputs']:
indices.append(model['symbols'][name]['index'])
src = src + 'static int DEFAULT_FIELDS[' + str(len(indices)) + '] = \n{\n'
src = src + formatArray(indices, width=10, quote='')
src = src + '};\n'
src = src + 'static OutputSpec DEFAULT_OUTSPEC = { ' + str(len(indices)) + ', DEFAULT_FIELDS };\n\n'
return src
# generate segment for embedded C chunks
# (by just pasting them all together -- this stuff is not checked)
def generateEmbeds(model):
return '/* Embedded C code from the model, if any */\n\n' + '\n'.join(model['embeds']) + '\n\n'
# generate the model functions segment
def generateModelFuncs(model, config):
if config['unused']:
targets = model['assigned']
else:
targets = list(model['assigned'] - model['unused'])
src = '/* Model-specific functions */\n'
src = src + generateModelInit(model, config, targets)
src = src + generateParamUpdate(model, config, targets)
src = src + generateSaveY(model, config)
src = src + generateSaveIntermediates(model, config)
src = src + generateCarryForward(model, config)
src = src + generateRHS(model, config, targets)
src = src + generateConstraints(model, config)
return src
# generate the model initialisation code
def generateModelInit(model, config, targets):
src = '''
/* Initialise parameters with any values known at compile time.
(NB: these may be overwritten by runtime values) */
void model_init()
{
'''
if not model['diagonal']:
src = src + ' double* mass = radau5_getMassMatrix();\n\n'
if config['debug']: src = src + ' fprintf(stderr, "# Initialising parameters\\n");\n\n'
independent = model['assignments']['independent']
for ii in range(len(independent['names'])):
name = independent['names'][ii]
if name in targets:
expr = independent['exprs'][ii]
idx = model['symbols'][name]['index']
src = src + ' RPAR[' + str(idx) + '] = ' + str_i_expr(expr['i_expr'], model) + ';'
src = src + '\t\t/* ' + name + '=' + expr['expr'] + ' */\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + name + ' = %.17g\\n", RPAR[' + str(idx) + ']);\n'
dependent = model['assignments']['dependent']
for ii in range(len(dependent['names'])):
name = dependent['names'][ii]
if name in targets:
expr = dependent['exprs'][ii]
idx = model['symbols'][name]['index']
src = src + ' RPAR[' + str(idx) + '] = ' + str_i_expr(expr['i_expr'], model) + ';'
src = src + '\t\t/* ' + name + '=' + expr['expr'] + ' */\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + name + ' = %.17g\\n", RPAR[' + str(idx) + ']);\n'
src = src + '\n constrain_params();\n'
src = src + '\n carry_forward();\n'
if not model['diagonal']:
idy = 0
for item in model['diffs']:
for aux in model['auxiliaries'][item]:
auxname = aux[1]
if auxname not in model['diffs']:
logger.error('Error: auxiliary term not in diffs: ' + auxname)
else:
idx = model['diffs'].index(aux[1])
src = src + '\n /* auxiliary diff eqn term: ' + item + "' : "
src = src + str(aux[0]) + " " + auxname + "' */\n"
# idy indexes the equation, idx the crossref
# Fortran uses column-major order for matrices,
# which I *think* makes this the right way to index
src = src + ' mass[VAR_COUNT * ' + str(idx) + ' + ' + str(idy) + '] = ' + str(aux[0]) + ';\n'
idy = idy + 1
src = src + '}\n'
return src
# generate param_update function
def generateParamUpdate(model, config, targets):
src = '''
/* Propagate parameter changes to any dependent parameters */
void param_update()
{
'''
step = model['assignments']['step']
if len(step) > 0:
if config['debug']: src = src + ' fprintf(stderr, "# Updating dependent parameters:\\n");\n\n'
for ii in range(len(step['names'])):
name = step['names'][ii]
if name not in targets: continue
expr = step['exprs'][ii]
idx = model['symbols'][name]['index']
src = src + ' RPAR[' + str(idx) + '] = ' + str_i_expr(expr['i_expr'], model, 'step') + ';'
src = src + '\t\t/* ' + name + '=' + expr['expr'] + ' */\n'
if config['debug']: src = src + ' fprintf(stderr, "' + name + ' = %.17g\\n", RPAR[' + str(idx) + ']);\n\n'
else:
src = src + ' /* no parameters to update for this model */\n'
src = src + '}\n'
return src
def generateSaveY(model, config):
src = '''
/* Copy Y values into corresponding spaces in the RPAR array */
void save_y(double* y)
{
'''
if config['debug']: src = src + ' fprintf(stderr, "# Saving Y estimates\\n");\n'
idy = 0
for item in model['diffs'] + model['algs']:
src = src + ' /* ' + item + ' */\n'
src = src + ' RPAR[' + str(model['symbols'][item]['index']) + '] = y[' + str(idy) + '];\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + item + ' = %.17g\\n", y[' + str(idy) + ']);\n'
idy = idy + 1
src = src + '}\n'
return src
def generateSaveIntermediates(model, config):
src = '''
/* Copy intermediate variables into corresponding spaces in the RPAR array */
void save_intermediates()
{
'''
if config['debug']: src = src + ' fprintf(stderr, "# Saving intermediates\\n");\n'
idy = 0
for item in model['intermeds']:
src = src + ' /* ' + item + ' */\n'
src = src + ' RPAR[' + str(model['symbols'][item]['index']) + '] = INTERMEDIATES[' + str(idy) + '];\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + item + ' = %.17g\\n", INTERMEDIATES[' + str(idy) + ']);\n'
idy = idy + 1
src = src + '}\n'
return src
def generateCarryForward(model, config):
src = '''
/* Update Y array with corresponding values from the RPAR array */
void carry_forward()
{
'''
if config['debug']: src = src + ' fprintf(stderr, "# Setting Y variables\\n");\n'
idy = 0
for item in model['diffs'] + model['algs']:
src = src + ' /* ' + item + ' */\n'
src = src + ' Y[' + str(idy) + '] = RPAR[' + str(model['symbols'][item]['index']) + '];\n'
if config['debug']:
src = src + ' fprintf(stderr, "' + item + ' = %.17g\\n", Y[' + str(idy) + ']);\n'
idy = idy + 1
src = src + '}\n'
return src
# generate right hand side function
def generateRHS(model, config, targets):
src = '''
/* right hand side of main equation system */
void rhs(int* n, double* x, double* y, double* f, double* rpar, int* ipar)
{
/* independent variable is always stored in RPAR[0] */
RPAR[0] = *x;
constrain_y(y);
constrain_params();
'''
if config['debug']: src = src + ' fprintf(stderr, "*** RHS step at %s = %.17g\\n", SYMBOLS[0], *x);\n'
runtime = model['assignments']['runtime']
if len(runtime['names']) > 0:
lhs = model['diffs'] + model['algs']
src = src + '\n /* calculate dependent parameters and intermediate variables */\n'
if config['debug']: src = src + ' fprintf(stderr, "# Calculating intermediates:\\n");\n\n'
for ii in range(len(runtime['names'])):
name = runtime['names'][ii]
if name in lhs: continue
if name not in targets: continue
expr = runtime['exprs'][ii]
idx = model['intermeds'].index(name)
src = src + ' INTERMEDIATES[' + str(idx) + '] = ' + str_i_expr(expr['i_expr'], model, 'solve') + ';'
src = src + '\t\t/* ' + name + '=' + expr['expr'] + ' */\n'
if config['debug']: src = src + ' fprintf(stderr, "' + name + ' = %.17g\\n", INTERMEDIATES[' + str(idx) + ']);\n\n'
src = src + '''
constrain_intermediates();
if ( SAVE_INTERMEDIATES )
save_intermediates();
constrain_params();
'''
else:
src = src + '\n /* no dependent parameters or intermediates required for this model */\n'
src = src + '\n if ( f )'
src = src + '\n {'
src = src + '\n /* calculate output variables */\n'
if config['debug']: src = src + ' fprintf(stderr, "# Calculating outputs:\\n");\n\n'
idy = 0
for name in model['diffs']:
# for the moment we'll just assume that the right expression is always
# the first in the list, and not even bother to look further
expr = model['symbols'][name]['diffs'][0]
src = src + ' /* ' + name + "' = " + expr['expr'] + ' */\n'
src = src + ' f[' + str(idy) + '] = ' + str_i_expr(expr['i_expr'], model, 'solve') + ';\n'
if config['debug']: src = src + ' fprintf(stderr, "' + name + '\' = %.17g\\n", f[' + str(idy) + ']);\n\n'
idy = idy + 1
for name in model['algs']:
expr = model['symbols'][name]['algs'][0]
src = src + ' /* ' + name + " = " + expr['expr'] + ' */\n'
src = src + ' f[' + str(idy) + '] = ' + str_i_expr(expr['i_expr'], model, 'solve') + ';\n'
if config['debug']: src = src + ' fprintf(stderr, "' + name + ' = %.17g\\n", f[' + str(idy) + ']);\n\n'
idy = idy + 1
src = src + ' }\n'
src = src + '}\n'
return src
def generateConstraints(model, config):
src = '''
/* Enforce constraints on parameters/intermediates (if any). */
void constrain_params ()
{
'''
targets = model['symbols'].keys()
if not config['unused']:
targets = list(set(targets) - model['unused'])
for name in targets:
sym = model['symbols'][name]
for constraint in sym['constraints']:
src = src + ' if ( RPAR[' + str(sym['index']) + '] ' + constraint['test'] + \
' ' + str_i_expr(constraint['i_expr'], model) + ' )\n {\n'
if constraint['kind'] == 'bound':
src = src + ' /* hard bound on ' + name + ' */\n'
src = src + ' RPAR[' + str(sym['index']) + '] = ' + \
str_i_expr(constraint['i_expr'], model) + ';\n'
else:
src = src + ' /* TODO: handle soft bound on ' + name + ' */\n'
src = src + ' }\n'
src = src + '''}
void constrain_intermediates()
{
'''
targets = model['intermeds']
if not config['unused']:
targets = list(set(targets) - model['unused'])
for name in targets:
idx = model['intermeds'].index(name)
sym = model['symbols'][name]
for constraint in sym['constraints']:
src = src + ' if ( INTERMEDIATES[' + str(idx) + '] ' + constraint['test'] + \
' ' + str_i_expr(constraint['i_expr'], model, 'solve') + ' )\n {\n'
if constraint['kind'] == 'bound':
src = src + ' /* hard bound on ' + name + ' */\n'
src = src + ' INTERMEDIATES[' + str(idx) + '] = ' + \
str_i_expr(constraint['i_expr'], model, 'solve') + ';\n'
else:
src = src + ' /* TODO: handle soft bound on ' + name + ' */\n'
src = src + ' }\n'
src = src + '''}
void constrain_y( double* y )
{
'''
targets = model['diffs'] + model['algs']
for name in targets:
idx = targets.index(name)
sym = model['symbols'][name]
for constraint in sym['constraints']:
src = src + ' if ( y[' + str(idx) + '] ' + constraint['test'] + \
' ' + str_i_expr(constraint['i_expr'], model, 'solve') + ' )\n {\n'
if constraint['kind'] == 'bound':
src = src + ' /* hard bound on ' + name + ' */\n'
src = src + ' y[' + str(idx) + '] = ' + \
str_i_expr(constraint['i_expr'], model, 'solve') + ';\n'
else:
src = src + ' /* TODO: handle soft bound on ' + name + ' */\n'
src = src + ' }\n'
src = src + '''}
'''
return src
# convert an i_expr tuple into C code with the appropriate
# data context
# recognised contexts are as follows:
# 'init' - model initialisation
# 'step' - after parameters have been assigned externally
# 'solve' - inside the solver RHS call
def str_i_expr(i_expr, model, context='init'):
expr = ''
for item in i_expr:
if item[0] == 'literal':
expr = expr + item[1]
elif item[0] == 'symbol':
expr = expr + str_i_symbol(item[1], model, context)
else:
# add a dummy symbol to produce a C compiler error
logger.error('unknown item |%s| in i_expr' % str(item))
expr = expr + 'ERROR_IN_IEXPR'
return expr
# map a symbol appropriately for the given context
# (see above for supported contexts)
def str_i_symbol(name, model, context):
if name in model['params']:
# these are always used from RPAR
return 'RPAR[' + str(model['symbols'][name]['index']) + ']'
elif name in model['roots']:
if context == 'solve':
return 'y[' + str((model['diffs'] + model['algs']).index(name)) + ']'
elif context == 'step':
return 'Y[' + str((model['diffs'] + model['algs']).index(name)) + ']'
else:
return 'RPAR[' + str(model['symbols'][name]['index']) + ']'
elif name in model['intermeds']:
# temp array used during a solve, but not outside
if context == 'solve':
return 'INTERMEDIATES[' + str(model['intermeds'].index(name)) + ']'
else:
return 'RPAR[' + str(model['symbols'][name]['index']) + ']'
else:
logger.error('unknown symbol |%s| in i_expr' % name)
return 'ERROR_IN_IEXPR'
## ------ utility functions ---------------
# generate a string containing the items of an array, formatted
# for embedding in code -- default settings are for string items,
# for numbers set quote=''
def formatArray(items, width=5, quote='"', inset=' ', sep=', ', end='\n'):
src = ''
idx = 0
while idx + width < len(items):
src = src + inset
for jj in range(width):
src = src + quote + str(items[idx + jj]) + quote + sep
idx = idx + width
src = src + end
src = src + inset
while idx + 1 < len(items):
src = src + quote + str(items[idx]) + quote + sep
idx = idx + 1
src = src + quote + str(items[idx]) + quote + end
return src
```
#### File: bcmd-web/bparser/doc_modeldef.py
```python
import sys
import os
import os.path
import re
import datetime, time
import decimal
def writeDoc(model, config):
with open(os.path.join(config['outdir'], config['modeldef']), 'w') as f:
printHeader(f, model, config)
printModelDocs(f, model, config)
printDirectives(f, model, config)
printEmbeds(f, model, config)
printReactions(f, model, config)
printDiffs(f, model, config)
printAlgs(f, model, config)
printRoots(f, model, config)
printIntermeds(f, model, config)
printParameters(f, model, config)
printFooter(f, model, config)
def printHeader(file, model, config):
print >> file, '# consolidated modeldef for model %s' % config['name']
print >> file, '# generated by BCMD module doc_modeldef.py'
print >> file, '# %s' % datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
print >> file, ''
def printFooter(file, model, config):
pass
def printDirectives(file, model, config):
some = False
# TODO: break each of the following into multiple directives if there are many
if model['inputs']:
print >> file, '@input %s' % ' '.join(model['inputs'])
some = True
if model['outputs'] and (set(model['outputs']) != set(model['roots'])):
print >> file, '@output %s' % ' '.join(model['outputs'])
some = True
if model['extern']:
print >> file, '@extern %s' % ' '.join(model['extern'])
some = True
if some:
print >> file, ''
def printEmbeds(file, model, config):
if model['embeds']:
print >> file, '# embedded C code'
print >> file, '[**'
for embed in model['embeds']:
print >> file, embed
print >> file, '**]'
print >> file, ''
def printModelDocs(file, model, config):
some = False
for line in model['modeldocs']:
if not (line.startswith('@') or line.startswith('$') or line.startswith('~')):
print >> file, '## %s' % line
some = True
if some:
print >> file, '## @\n'
def printReactions(file, model, config):
if not model['reactions']:
return
print >> file, '# chemical reactions'
for reac in sorted(model['reactions'].keys(), key=lambda s: s.lower()):
# merge forward/reverse pairs into single two-way reactions
if reac.endswith('_reverse') and (reac[:-7] + 'forward') in model['reactions']:
continue
op = '->'
# collect terms and forward rate
lhs = ''
for term in model['reactions'][reac]['lhs']:
stoich = translate(term['mathterm_unmod'], model, config)[0]
try:
dec = decimal.Decimal(stoich)
if dec == 1:
stoich = ''
except:
pass
if stoich:
stoich = stoich + ' '
chem = term['chem']
if lhs:
lhs = '%s + %s[%s]' % (lhs, stoich, chem)
else:
lhs = '%s[%s]' % (stoich, chem)
rhs = ''
for term in model['reactions'][reac]['rhs']:
stoich = translate(term['mathterm_unmod'], model, config)[0]
try:
dec = decimal.Decimal(stoich)
if dec == 1:
stoich = ''
except:
pass
if stoich:
stoich = stoich + ' '
chem = term['chem']
if rhs:
rhs = '%s + %s[%s]' % (rhs, stoich, chem)
else:
rhs = '%s[%s]' % (stoich, chem)
fspec = model['reactions'][reac]['ratespec']
fargs = ','.join([translate(x[1], model, config)[0] for x in fspec[2][1:]])
if fspec[1] == 'MA':
forward = '{MA:%s}' % fargs
elif fspec[1] == 'MM':
forward = '{MM:%s}' % fargs
else:
forward = '{%s}' % fargs
# if there's a reverse reaction, get that rate too
if reac.endswith('_forward') and (reac[:-7] + 'reverse') in model['reactions']:
rspec = model['reactions'][reac[:-7] + 'reverse']['ratespec']
rargs = ','.join([translate(x[1], model, config)[0] for x in rspec[2][1:]])
if rspec[1] == 'MA':
reverse = '{MA:%s}' % rargs
elif rspec[1] == 'MM':
reverse = '{MM:%s}' % rargs
else:
reverse = '{%s}' % rargs
op = '<->'
else:
reverse = ''
rspec = ()
print >> file, ('%s %s %s %s %s' % (lhs, op, rhs, forward, reverse)).strip()
print >> file, ''
## TEMP HACK -- ultimately I expect to just get rid of this entirely...
def latexName(name, model, insert=True):
return name
def printDiffs(file, model, config):
if not model['diffs']:
return
print >> file, '# differential equations'
if config.get('model-comment-chem-diffs', True) and any([x in model['chemicals'] for x in model['diffs']]):
print >> file, '# note: commented out equations are defined by the chemical reactions above'
print >> file, '# translated forms are shown here for information only'
tt = latexName(model['symlist'][0], model)
first = True
for name in sorted(model['diffs'], key=lambda s: s.lower()):
lhs = "%s'" % latexName(name, model)
for aux in model['auxiliaries'][name]:
mass = aux[0]
if mass < 0:
mass = -mass
op = '-'
else:
op = '+'
if mass == 1:
mstr = ''
else:
mstr = str(mass)
lhs = lhs + op + mstr + "%s'" % latexName(aux[1], model)
rhs = substitute(model['symbols'][name]['diffs'][0], model, config).strip()
if name in model['chemicals']:
if config.get('model-comment-chem-diffs', False):
print >> file, '# %s = %s' % (lhs, rhs)
else:
print >> file, '%s = %s' % (lhs, rhs)
print >> file, ''
def printAlgs(file, model, config):
if not model['algs']:
return
print >> file, '# algebraic relations'
for name in sorted(model['algs'], key=lambda s: s.lower()):
rhs = substitute(model['symbols'][name]['algs'][0], model, config)
print >> file, '%s : 0 = %s' % (name, rhs)
print >> file, ''
def printRoots(file, model, config):
printVars(sorted(model['roots'], key=lambda s: s.lower()), 'state variables', file, model, config, omit_expr=True)
def printIntermeds(file, model, config):
printVars(sorted(model['intermeds'], key=lambda s: s.lower()), 'intermediate variables', file, model, config)
def printParameters(file, model, config):
printVars(sorted(model['params'], key=lambda s: s.lower()), 'parameters', file, model, config)
def printVars(vars, title, file, model, config, omit_expr=False):
if vars:
print >> file, '# %s' % title
print >> file, ''
for name in vars:
printVar(name, file, model, config, omit_expr)
print >> file, ''
def printVar(name, file, model, config, omit_expr=False):
sym = model['symbols'][name]
for line in sym['docs']:
if line.startswith('+') or line.startswith('@') or line.startswith('$') or line.startswith('~'):
pass
else:
print >> file, '## %s' % line
units = sym.get('units', '')
if units:
print >> file, '## ~ %s' % units
latex = sym.get('latex', '')
if latex:
print >> file, '## $%s$' % latex
tags = ' '.join(sym.get('tags', []))
if tags:
print >> file, '## + %s' % tags
noninits = []
if not omit_expr:
noninits = [x for x in sym['assigns'] if not x['init']]
if noninits:
noninit = substitute(noninits[0], model, config)
print >> file, '%s = %s' % (name, noninit)
inits = [x for x in sym['assigns'] if x['init']]
if inits:
init = substitute(inits[0], model, config)
elif noninits:
# skip the default initialiser if there is already an assignment
init = None
else:
init = '0'
if init is not None:
print >> file, '%s := %s' % (name, init)
for constraint in sym['constraints']:
# at present the compiler doesn't store a mathexpr for constraints, so use the old expr stuff
# (this will also need to be extended if and when soft constraints ever get implemented)
invtest = { '>':'<=', '>=':'<', '<':'>=', '<=':'>'}.get(constraint['test'], 'ERROR')
if not (name in model['chemicals'] and (invtest == '>=') and constraint['expr']=='0'):
print >> file, '%s %s %s' % ( name, invtest, constraint['expr'] )
print >> file, ''
def substitute(init, model, config):
# init is an assigns entry, ie a dict with expr, depends, etc
# we don't yet construct mathterms for diff eqs from chem eqs, so have to check
if 'mathterm' in init:
expr = translate(init['mathterm'], model, config)[0]
else:
expr = init['expr']
# need to have some way of managing collisions here -- this will eventually get more sensible
# but for now, we substitute long ids before short
for dep in sorted(init['depends'], key=lambda x:-len(x)):
expr = expr.replace(dep, latexName(dep, model))
return expr
def translate(math, model, config):
if isinstance(math, decimal.Decimal):
# yet another formatting special case, purely because these annoy me!
result = str(math)
if result.endswith('.0'):
result = result[:-2]
return (result, '')
if isinstance(math, str):
return (latexName(math, model), '')
if math[0] == 'function':
if len(math) < 3:
args = ''
else:
args = ', '.join([ translate(x[1], model, config)[0] for x in math[2][1:] ])
return ('%s(%s)' % (math[1], args), '')
if math[0] == 'conditional':
return ('%s ? %s : %s' % (translate_binop(math[1], model, config)[0],
translate(math[2][1], model, config)[0],
translate(math[3][1], model, config)[0]), '')
if math[0] == 'arithmetic':
return translate_binop(math, model, config)
return ('[ERROR]', '')
def translate_binop(math, model, config):
lhs, lop = translate(math[2][1], model, config)
rhs, rop = translate(math[3][1], model, config)
# check for pure numbers, because we want to handle some special cases
try:
L = decimal.Decimal(lhs)
except:
L = None
try:
R = decimal.Decimal(rhs)
except:
R = None
if math[1] == '*':
if lop == '+' or lop == '-':
lhs = '(%s)' % lhs
if rop == '+' or rop == '-':
rhs = '(%s)' % rhs
# numeric special cases
if L is not None:
# nested special case for our stupid handling of unary minus in the parser...
if L == -1:
op = ''
lhs = ''
rhs = '-%s' % rhs
# and to eliminate superfluous multiplications by 1
elif L == 1:
op = ''
lhs = ''
else:
op = '*'
elif R is not None:
if R == 1:
op = ''
rhs = ''
else:
op = '*'
else:
op = '*'
return (('%s %s %s' % (lhs, op, rhs)).strip(), '*')
if math[1] == '/':
if lop == '+' or lop == '-':
lhs = '(%s)' % lhs
if rop == '+' or rop == '-' or rop == '*' or rop == '/':
rhs = '(%s)' % rhs
return ('%s / %s' % (lhs, rhs), '/')
if math[1] == '^':
if lop != '':
lhs = '(%s)' % lhs
return ('%s^(%s)' % (lhs, rhs), '^')
if math[1] == '+':
# another dodgy special case: convert + - into -
if rhs.strip().startswith('-'):
return( '%s - %s' % (lhs, rhs.strip()[1:]), '-' )
# and yet another, perhaps dodgiest of all: convert -a + b into b - a
if lhs.strip().startswith('-'):
return( '%s - %s' % (rhs, lhs.strip()[1:]), '-' )
return ('%s + %s' % (lhs, rhs), '+')
if math[1] == '-':
if rop == '-':
rhs = '(%s)' % rhs
return ('%s - %s' % (lhs, rhs), '-')
# all remaining binops are logical
# these only occur in conditions and have the weakest precedence, so we never bracket
return ('%s %s %s' %(lhs, math[1], rhs), '')
```
#### File: bcmd-web/bparser/logger.py
```python
import sys
import pprint
# extremely minimal logging module for writing out messages at
# quasi-arbitrary verbosity levels -- some standard levels are
# defined, but module can also be used with arbitrary levels
DISASTER = -1
ERROR = 1
WARNING = 3
MESSAGE = 5
DETAIL = 7
# default verbosity hides messages and
verbosity = WARNING
deathThrows = True
# go to stderr by default
dest = sys.stderr
# just write it
def write(msg, level=1):
if level <= verbosity:
print(msg, file=dest)
# prettify it first
def pretty(msg, level=1):
if level <= verbosity:
if isinstance(msg, str):
print(msg, file=dest)
else:
print(pprint.pformat(msg), file=dest)
# some message level wrappers
def error(msg, prettify=False):
if prettify:
pretty(msg, level=ERROR)
else:
write(msg, level=ERROR)
def warn(msg, prettify=False):
if prettify:
pretty(msg, level=WARNING)
else:
write(msg, level=WARNING)
def message(msg, prettify=False):
if prettify:
pretty(msg, level=MESSAGE)
else:
write(msg, level=MESSAGE)
def detail(msg, prettify=True):
if prettify:
pretty(msg, level=DETAIL)
else:
write(msg, level=DETAIL)
def die(msg, prettify=False):
if prettify:
pretty(msg, level=DISASTER)
else:
write(msg, level=DISASTER)
if deathThrows:
raise Exception(msg)
``` |
{
"source": "Jignesh1996/s2w",
"score": 3
} |
#### File: s2w/run/model.py
```python
import pickle
from keras.layers import Input, LSTM, Embedding, Dense,Dropout,TimeDistributed
from keras.models import Model
from sklearn.model_selection import train_test_split
from model_file import build
class DumbModel:
def __init__(self,vocab_size=10000,num_of_encoder_tokens,num_of_decoder_tokens):
self.vocab_size = vocab_size
self.clf=None
self.num_of_encoder_tokens = num_of_encoder_tokens
self.num_of_decoder_tokens = num_of_decoder_tokens
def generate_batch(X = X_train, y = y_train, batch_size = 128):
while True:
for j in range(0, len(X), batch_size):
#encoder input
encoder_input_data = np.zeros((batch_size, max_source_length),dtype='float32')
#decoder input
decoder_input_data = np.zeros((batch_size, max_target_size),dtype='float32')
#target
decoder_target_data = np.zeros((batch_size, max_target_size, num_of_decoder_tokens),dtype='float32')
for i, (input_text, target_text) in enumerate(zip(X[j:j+batch_size], y[j:j+batch_size])):
for t, word in enumerate(input_text.split()):
encoder_input_data[i, t] = eng_char_to_index_dict[word] # encoder input seq
for t, word in enumerate(target_text.split()):
if t<len(target_text.split())-1:
decoder_input_data[i, t] = target_char_to_index_dict[word] # decoder input seq
if t>0:
# decoder target sequence (one hot encoded)
# does not include the START_ token
# Offset by one timestep since it is one time stamp ahead
decoder_target_data[i, t - 1, target_char_to_index_dict[word]] = 1
yield([encoder_input_data, decoder_input_data], decoder_target_data)
def train(self,X_train,y_train):
model = build(num_of_encoder_tokens,num_of_decoder_tokens)
X_train, X_test, y_train, y_test = train_test_split(X_train,y_train, test_size = 0.2)
train_samples = len(X_train)
val_samples = len(X_test)
batch_size = 50
epochs = 50
model.fit_generator(generator = generate_batch(X_train, y_train, batch_size = batch_size ),
steps_per_epoch = train_samples//batch_size,
epochs=epochs,
callbacks=[es],
validation_data = generate_batch(X_test, y_test, batch_size = batch_size),
validation_steps = val_samples//batch_size,)
pass
def inference(self):
# Inference model
# Encoder
encoder_inputs = Input(shape=(None,))
enc_emb = Embedding(num_of_encoder_tokens, latent_dim, mask_zero = True)(encoder_inputs)
encoder_lstm = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(enc_emb)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,))
dec_emb_layer = Embedding(num_of_decoder_tokens, latent_dim, mask_zero = True)
dec_emb = dec_emb_layer(decoder_inputs)
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(dec_emb,
initial_state=encoder_states)
decoder_dense = TimeDistributed(Dense(num_of_decoder_tokens, activation='softmax'))
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
#storing encoder input and internal states so as to give to decoder part
encoder_model = Model(encoder_inputs, encoder_states)
#specifying hidden and cell state for decoder part as vector process it will get output predicted and again we add to decoder states
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
dec_emb2= dec_emb_layer(decoder_inputs) # Get the embeddings of the decoder sequence
# To predict the next word in the sequence, set the initial states to the states from the previous time step
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2, initial_state=decoder_states_inputs)
decoder_states2 = [state_h2, state_c2]
decoder_outputs2 = decoder_dense(decoder_outputs2) # A dense softmax layer to generate prob dist. over the target vocabulary
# Final decoder model
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs2] + decoder_states2)
return encoder_model,decoder_model
def decode_sequence(self,input_seq):
# Encode the input as state vectors
encoder_model,decoder_model= inference()
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1,1))
target_seq[0, 0] = mar_char_to_index_dict['START_']
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = mar_index_to_char_dict[sampled_token_index]
if (sampled_char == '_END'):
break;
decoded_sentence += ' '+sampled_char
target_seq = np.zeros((1,1))
target_seq[0, 0] = sampled_token_index
# Update states
states_value = [h, c]
return decoded_sentence
def pre_process(self):
sentence = sentence.lower()
sentance = re.sub("'","",sentence).strip()
# sentence = re.sub(" +", " ", sentence)
# remove_digits = str.maketrans('','',digits)
# sentence=sentence.translate(remove_digits)
sentance = ' '.join(e.lower() for e in sentance.split() if e.lower() not in exclude)
encoder_input_data = np.zeros((1, 35),dtype='float32')
for t, word in enumerate(sentence.split()):
encoder_input_data[0, t] = eng_char_to_index_dict[word]
return encoder_input_data
def predict(self,x):
sent = pre_processing(x)
predicted_output = decode_sequence(sent)
return predicted_output
def serialize(self,fname):
with open(fname,'wb') as f:
pickle.dump(self.clf,f)
@staticmethod
def deserialize(fname):
model = DumbModel()
with open(fname,'rb') as f:
model.clf=pickle.load(f)
return model
``` |
{
"source": "jigneshoo7/AlgoBook",
"score": 4
} |
#### File: python/Binary Tree/bst_recursion.py
```python
class Node:
def __init__(self,key):
self.left = None
self.right = None
self.value = key
def insert(root,key):
if root is None:
return Node(key)
else:
if root.value == key:
print("Value already exists")
return root
elif root.value < key:
root.right = insert(root.right, key)
else:
root.left = insert(root.left, key)
return root
def inorder(root):
if root is not None:
inorder(root.left)
print(root.value)
inorder(root.right)
def search(root, key):
if root is None:
print("Element not found")
return None
elif root.value == key:
print("Element found")
return root
if root.value > key:
search(root.left,key)
else:
search(root.right, key)
def delete(root, key):
if root is None:
return root
if key < root.value:
root.left = delete(root.left, key)
elif(key > root.value):
root.right = delete(root.right, key)
else:
if root.left is None :
temp = root.right
root = None
return temp
elif root.right is None :
temp = root.left
root = None
return temp
temp = getMin(root.right)
root.value = temp.value
root.right = delete(root.right , temp.value)
return root
def getMin(node):
current = node
while(current.left is not None):
current = current.left
return current
if __name__=="__main__":
r = insert(None,int(input("Enter root node: ")))
while True:
choice = int(input("1 - Insert Node, 2 - Print in inorder, 3 - Delete, 4 - Search: , 5 - Exit: "))
if choice == 1:
r = insert(r,int(input("Enter node value: ")))
elif choice == 2:
inorder(r)
elif choice == 3:
r = delete(r,int(input("Enter the value of the node to be deleted: ")))
elif choice == 4:
search(r,int(input("Enter the node value to be searched: ")))
elif choice == 5:
break
```
#### File: python/Binary Tree/LazysegmentTree.py
```python
Tree/LazysegmentTree.py
N=1000001
a=[0]*N
s=[0]*(4*N)
lazy=[0]*(4 *N)
def build(l,r,p):
if l>r:
return None
if l==r:
s[p]=a[l]
return None
m=(l+r)//2
build(l,m,2*p+1)
build(m+1,r,2*p+2)
s[p]=s[2*p+1]+s[2*p+2]
def update(l,r,i,j,p,v):
if lazy[p]!=0:
s[p]+=(r-l+1)*lazy[p]
if l!=r:
lazy[2*p+1]+=lazy[p]
lazy[2*p+2]+=lazy[p]
lazy[p]=0
if l>r or r<i or l>j : #no overlap
return
if (l>=i and r<=j) : #complete overlap
s[p]+=(r-l+1)*v
if l!=r:
lazy[2*p+1]+=v
lazy[2*p+2]+=v
return
m=(l+r)//2
update(l,m,i,j,2*p+1,v)
update(m+1,r,i,j,2*p+2,v)
s[p]=s[2*p+1]+s[2*p+2]
def query(l,r,i,j,p):
if (l>r or r<i or l>j) :
return 0
if lazy[p]!=0 :
s[p]+=(r-l+1)*lazy[p]
if l!=r:
lazy[2*p+1]+=lazy[p]
lazy[2*p+2]+=lazy[p]
lazy[p]=0
if (l>=i and r<=j) :
return s[p]
m=(l+r)//2
ans=query(l,m,i,j,2*p+1)
ans1=query(m+1,r,i,j,2*p+2)
return ans+ans1
def main():
n,c=map(int,input().split())
build(0,n-1,0)
x=y=z=val=0
for i in range(c):
g=list(map(int,input().split()))
x=g[0]
if x==0:
y=g[1]
z=g[2]
val=g[3]
update(0,n-1,y,z,0,val)
else :
y=g[1]
z=g[2]
print(query(0,n-1,y,z,0))
if __name__=='__main__':
t=int(input())
while t>0:
main()
t-=1
```
#### File: python/distance/HaversineDistanceInMiles.py
```python
import math
def distanceInMilesOrKilos(milesOrKilos,originLat,originLon,destinationLat,destinationLon):
radius = 3959 if milesOrKilos == "miles" else 6371
lat1 = originLat
lat2 = destinationLat
lon1 = originLon
lon2 = destinationLon
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = (math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = radius * c
return distance
```
#### File: python/dynamic_programming/Edit_distance.py
```python
def editdistance(str1 , str2):
m = len(str1)
n = len(str2)
# intialize with zeros
dp = [[0 for x in range(n+1)] for x in range(m + 1)]
for i in range(m + 1):
for j in range(n+1):
# if first string empty then add all character of second string
if i==0:
dp[i][j] = j
# if second string is empty then we heva one option to remove all charcetrs from second string.
elif j==0:
dp[i][j] = i
# if both string last character are same then ignore and skip it
elif str1[i-1] == str2[j-1]:
dp[i][j] = dp[i-1][j-1]
# not if character does not match then we have to check for all possibilites insert, remove and replace
else:
dp[i][j] = 1 + min(dp[i][j-1], # insert
dp[i-1][j], # remove
dp[i-1][j-1]) # replace
return dp[m][n]
str_1 = input()
str_2 = input()
print(editdistance(str_1 , str_2))
```
#### File: python/dynamic_programming/fibonacci_topdown.py
```python
def fibonacci(n,memo):
if n==0 or n==1:
return n
if memo[n]!=0:
return memo[n]
else:
memo[n]=fibonacci(n-1,memo)+fibonacci(n-2,memo)
return memo[n]
if __name__=="__main__":
n = int(input("Enter a whole number\n"));
memo = [0 for i in range(n+1)]
val=fibonacci(n,memo)
print(val)
```
#### File: python/graph_algorithms/Dijkstra's_Shortest_Path_Implementation_using_Adjacency_List.py
```python
class Node_Distance :
def __init__(self, name, dist) :
self.name = name
self.dist = dist
class Graph :
def __init__(self, node_count) :
self.adjlist = {}
self.node_count = node_count
def Add_Into_Adjlist(self, src, node_dist) :
if src not in self.adjlist :
self.adjlist[src] = []
self.adjlist[src].append(node_dist)
def Dijkstras_Shortest_Path(self, source) :
# Initialize the distance of all the nodes from source to infinity
distance = [999999999999] * self.node_count
# Distance of source node to itself is 0
distance[source] = 0
# Create a dictionary of { node, distance_from_source }
dict_node_length = {source: 0}
while dict_node_length :
# Get the key for the smallest value in the dictionary
# i.e Get the node with the shortest distance from the source
source_node = min(dict_node_length, key = lambda k: dict_node_length[k])
del dict_node_length[source_node]
for node_dist in self.adjlist[source_node] :
adjnode = node_dist.name
length_to_adjnode = node_dist.dist
# Edge relaxation
if distance[adjnode] > distance[source_node] + length_to_adjnode :
distance[adjnode] = distance[source_node] + length_to_adjnode
dict_node_length[adjnode] = distance[adjnode]
for i in range(self.node_count) :
print("Source Node ("+str(source)+") -> Destination Node(" + str(i) + ") : " + str(distance[i]))
def main() :
g = Graph(6)
# Node 0: <1,5> <2,1> <3,4>
g.Add_Into_Adjlist(0, Node_Distance(1, 5))
g.Add_Into_Adjlist(0, Node_Distance(2, 1))
g.Add_Into_Adjlist(0, Node_Distance(3, 4))
# Node 1: <0,5> <2,3> <4,8>
g.Add_Into_Adjlist(1, Node_Distance(0, 5))
g.Add_Into_Adjlist(1, Node_Distance(2, 3))
g.Add_Into_Adjlist(1, Node_Distance(4, 8))
# Node 2: <0,1> <1,3> <3,2> <4,1>
g.Add_Into_Adjlist(2, Node_Distance(0, 1))
g.Add_Into_Adjlist(2, Node_Distance(1, 3))
g.Add_Into_Adjlist(2, Node_Distance(3, 2))
g.Add_Into_Adjlist(2, Node_Distance(4, 1))
# Node 3: <0,4> <2,2> <4,2> <5,1>
g.Add_Into_Adjlist(3, Node_Distance(0, 4))
g.Add_Into_Adjlist(3, Node_Distance(2, 2))
g.Add_Into_Adjlist(3, Node_Distance(4, 2))
g.Add_Into_Adjlist(3, Node_Distance(5, 1))
# Node 4: <1,8> <2,1> <3,2> <5,3>
g.Add_Into_Adjlist(4, Node_Distance(1, 8))
g.Add_Into_Adjlist(4, Node_Distance(2, 1))
g.Add_Into_Adjlist(4, Node_Distance(3, 2))
g.Add_Into_Adjlist(4, Node_Distance(5, 3))
# Node 5: <3,1> <4,3>
g.Add_Into_Adjlist(5, Node_Distance(3, 1))
g.Add_Into_Adjlist(5, Node_Distance(4, 3))
g.Dijkstras_Shortest_Path(0)
print("\n")
g.Dijkstras_Shortest_Path(5)
if __name__ == "__main__" :
main()
```
#### File: python/linked_list/Circular_LinkedList.py
```python
class Node:
def __init__(self, data):
self.data = data
self.next = None
class CircularLinkedList:
def __init__(self):
self.count = 0
self.head = Node(None)
self.tail = Node(None)
self.head.next = self.tail
self.tail.next = self.head
def size(self):
return self.count
def add(self, data):
newNode = Node(data)
if self.head.data is None:
self.head = newNode
self.tail = newNode
newNode.next = self.head
else:
self.tail.next = newNode
self.tail = newNode
self.tail.next = self.head
self.count += 1
def printList(self):
temp = self.head
if self.head != None:
while True:
print(temp.data, end=" ")
temp = temp.next
if temp == self.head:
break
if __name__ == "__main__":
cll = CircularLinkedList()
cll.add(1)
cll.add(-2)
cll.add(40)
cll.add(-101)
cll.add(220)
cll.add(13)
cll.printList()
print(cll.size())
```
#### File: python/maths/gcd.py
```python
def gcd(num1,num2):
if(num2==0):
return num1
else:
return gcd(num2,num1%num2)
num1 = int(input("Enter an Integer: "))
num2 = int(input("Enter another Integer: "))
print ("GCD: ",gcd(num1,num2))
```
#### File: python/Matrix/Bullet-Brick.py
```python
def MinBullet(points):
# Sort the points in ascending order
for i in range(len(points)):
points[i] = points[i][::-1]
points = sorted(points)
for i in range(len(points)):
points[i] = points[i][::-1]
# Check if there are no points
if (len(points) == 0):
return 0
cnt = 1
curr = points[0][1]
# Iterate through all the points
for j in range(1, len(points)):
if (curr < points[j][0]):
# Increase the count
cnt += 1
curr = points[j][1]
# Return the count
return cnt
# sample brick
if __name__ == '__main__':
bricks = [ [ 10, 16 ],
[ 2, 8 ],
[ 1, 6 ],
[ 7, 12 ]]
# Function call
print(MinBullet(bricks))
```
#### File: python/searching/jumpsearch.py
```python
import math #for sqrt
def jumpsearch(arr,ele): #function to perform jump search
n = len(arr) #Length of the list/array
block = int(math.sqrt(n))#Defining size of the block(no of elements to be skipped)
start=0
end = 0
while start < n and arr[start] <= ele:
end = min(n - 1, start + block)#Taking the minimum of the length and start
if arr[start] <= ele and arr[end] >= ele: #If block in which element is present is found then break
break
start += block #Keep incrementing start with the value of block size unti block is found
if start >= n or arr[start] > ele: #If element is not found
return -1
end = min(n- 1, end)#Taking the minimum of the length and end
i = start
while i <= end and arr[i] <=ele:#Search for the element in the respective block
if arr[i] == ele:
return i
i += 1
return -1
def main():#Driver function
arr = [1,2,3,4,5,6,7,8,9] #Input array/list
pos = jumpsearch(arr,5)
if(pos>=0):
print("Element located at pos: ",pos+1)
else:
print("Element not located")
main()
```
#### File: python/string algorithms/boyer_moore_sunday.py
```python
def search_letter_in_pattern(pattern,letter):
"""
Search the letter inside in pattern if no converge return -1
"""
m = len(pattern)
count = 1
for i in range(m-1,-1,-1):
if letter==pattern[i]:
return count
count = count+1
return -1
def boyern_sunday(text,pattern):
"""
text:str
pattern:str
"""
coincidence = 0
n = len(text)
m = len(pattern)
j = m-1
i = m-1
iterations = 0
while(i<=n-1):
if j==m-1:
h = i #the rightmost value
if text[i]==pattern[j]:
if j==0:
coincidence = coincidence+1
j = m-1
i = (i+m-1)+j
iterations = iterations+1
else:
i = i-1
j = j-1
else:
#If it enters here it is because the last one no longer converges and there is no need to compare more
if h==n-1:
return coincidence
j = m-1
l = search_letter_in_pattern(pattern,text[h+1])
if l<0:
#the next move is the pattern quantity +1
i = h+m+1
iterations = iterations+1
else:
i = h+l
iterations = iterations+1
return coincidence
print(boyern_sunday("jairo jandresja","ja"))
``` |
{
"source": "jigneshpshah/organic_shop",
"score": 2
} |
#### File: organic_shop/utils/frappe.py
```python
import frappe
import datetime
from frappe.utils import cstr
import frappe.handler
@frappe.whitelist()
def upload_file():
# Upload file
file = frappe.handler.uploadfile()
return file
```
#### File: www/organic-cart/index.py
```python
import frappe
import json
from erpnext.shopping_cart.cart import get_cart_quotation
# def get_context(context):
# context.update(get_cart_quotation())
def get_context(context):
# if frappe.session.user == 'Guest':
# frappe.local.flags.redirect_location = '/'
# raise frappe.Redirect
context.session = frappe.session
context.user = frappe.session.user
context.csrf_token = frappe.sessions.get_csrf_token()
context.item_group = frappe.get_all("Item Group",filters={"show_in_website":1,"is_group":0},fields=["name"])
# context.item = frappe.get_all("Item",filters={"show_in_website":1},fields=["name","image","item_group"])
context.item_result = get_items()
context.update(get_cart_quotation())
return context
def get_items():
result_items = []
items = frappe.db.sql(""" select name,item_name,route,has_variants,item_group,website_warehouse,image from `tabItem` it where it.show_in_website = 1 and it.is_sales_item = 1""",as_dict = 1)
for item in items:
if item.has_variants == 1:
variant = frappe.db.sql("""select name,item_name,item_group,website_warehouse from `tabItem` it where it.variant_of = %s""",item.name,as_dict = 1)
variant_list = []
in_stock = 0
for var in variant:
price = frappe.db.get_value("Item Price",{"item_code":var.name,"price_list":frappe.db.get_value("Shopping Cart Settings",None,"price_list"),"selling":1},"price_list_rate")
stock = frappe.db.get_value("Bin",{"item_code":var.name,"warehouse":var.website_warehouse},"actual_qty")
if stock != None and stock != 0:
in_stock=1
variant_list.append({
"variant_name":var.item_name,
"variant":var.name,
"stock":stock,
"price":price,
})
result_items.append({
"route":item.route,
"has_variant":item.has_variants,
"item":item.name,
"item_name":item.item_name,
"image":item.image,
"variant":variant_list,
"item_group":item.item_group,
"in_stock":in_stock
})
else:
in_stock = 0
price = frappe.db.get_value("Item Price",{"item_code":item.name,"price_list":frappe.db.get_value("Shopping Cart Settings",None,"price_list"),"selling":1},"price_list_rate")
stock = frappe.db.get_value("Bin",{"item_code":item.name,"warehouse":item.website_warehouse},"actual_qty")
if stock != None and stock != 0:
in_stock=1
result_items.append({
"route":item.route,
"has_variant":item.has_variants,
"item":item.name,
"item_name":item.item_name,
"image":item.image,
"variant":None,
"stock":stock,
"price":price,
"item_group":item.item_group,
"in_stock":in_stock
})
return result_items
``` |
{
"source": "jigneshpurohit/openshift_playbook",
"score": 2
} |
#### File: openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
```python
import re
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
# pylint: disable=too-many-branches,too-many-statements,too-many-arguments
def run(self, terms, variables=None, zones_enabled=True, short_version=None,
deployment_type=None, **kwargs):
priorities = []
if short_version is None or deployment_type is None:
if 'openshift' not in variables:
raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
if deployment_type is None:
if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
raise AnsibleError("This lookup module requires that the deployment_type be set")
deployment_type = variables['openshift']['common']['deployment_type']
if short_version is None:
if 'short_version' in variables['openshift']['common']:
short_version = variables['openshift']['common']['short_version']
elif 'openshift_release' in variables:
release = variables['openshift_release']
if release.startswith('v'):
short_version = release[1:]
else:
short_version = release
short_version = '.'.join(short_version.split('.')[0:2])
elif 'openshift_version' in variables:
version = variables['openshift_version']
short_version = '.'.join(version.split('.')[0:2])
else:
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
if deployment_type == 'openshift-enterprise':
# convert short_version to origin short_version
short_version = re.sub('^3.', '1.', short_version)
if short_version == 'latest':
short_version = '1.6'
if short_version == '1.1':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1}
])
if short_version == '1.2':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'NodeAffinityPriority', 'weight': 1}
])
if short_version == '1.3':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'NodeAffinityPriority', 'weight': 1},
{'name': 'TaintTolerationPriority', 'weight': 1}
])
if short_version == '1.4':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
{'name': 'NodeAffinityPriority', 'weight': 1},
{'name': 'TaintTolerationPriority', 'weight': 1},
{'name': 'InterPodAffinityPriority', 'weight': 1}
])
if short_version in ['1.5', '1.6']:
priorities.extend([
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'InterPodAffinityPriority', 'weight': 1},
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
{'name': 'NodeAffinityPriority', 'weight': 1},
{'name': 'TaintTolerationPriority', 'weight': 1}
])
if zones_enabled:
zone_priority = {
'name': 'Zone',
'argument': {
'serviceAntiAffinity': {
'label': 'zone'
}
},
'weight': 2
}
priorities.append(zone_priority)
return priorities
``` |
{
"source": "jigneshvasoya/tlspsk-wrapper",
"score": 3
} |
#### File: tlspsk-wrapper/pytlspsk/ssl_psk.py
```python
import ssl
import _ssl_psk
# memory leak!
_sslptr_to_psk = {}
def _psk_callback(ssl):
return _sslptr_to_psk[ssl]
_ssl_psk.set_python_psk_callback(_psk_callback)
def set_client_psk(ssl, psk):
ptr = _ssl_psk.set_psk_callback(ssl._sslobj)
_sslptr_to_psk[ptr] = psk
def set_server_psk(ssl, psk):
ptr = _ssl_psk.set_psk_server_callback(ssl._sslobj)
_sslptr_to_psk[ptr] = psk
def wrap_socket(*args, **kwargs):
psk = kwargs.setdefault('psk', None)
del kwargs['psk']
do_handshake_on_connect = kwargs.get('do_handshake_on_connect', True)
kwargs['do_handshake_on_connect'] = False
kwargs.setdefault('server_side', False)
server_side = kwargs['server_side']
if psk:
del kwargs['server_side'] # bypass need for cert
sock = ssl.wrap_socket(*args, **kwargs)
if psk:
if server_side:
set_server_psk(sock, psk)
else:
set_client_psk(sock, psk)
if do_handshake_on_connect:
sock.do_handshake()
return sock
``` |
{
"source": "jignyasi/adhyapiyutam",
"score": 3
} |
#### File: jignyasi/adhyapiyutam/basics.py
```python
print 'hello'
a = 'hello'
if(a == b){
do this
}
if a==a:
print '1'
print '2'
print 'something'
if True:
print '1'
if True:
print '2'
if "True":
print '3'
name = 'anand'
age = 28
loc = 'hyd'
print 'hi, my name is {0} and my age is {1} and my loc is {2} and my sal is {1}L'.format(name,age,loc)
print float(2) / 3
print 17 // 3
print 17 % 3
print 2 ** 6
print pow(2,5)
print not (True and True)
print not (True or False)
print not (True | False)
print not (True & True)
print 3!=3
a = 3
a+=1
print a
a*=2
print a
if a<10:
print 'level-0 {0}'.format(a)
elif a < 20:
print 'level-1 {0}'.format(a)
else:
print 'level-2 {0}'.format(a)
flag = 0
while flag <5:
print flag
flag+=1
for flag in range(5):
print 'something'
if flag ==2:
continue
print flag
if flag == 4:
break
def myfun():
print 'hello world!'
return None
myfun()
def myfun(x):
return 'hello world!{0}'.format(x)
print myfun()
print myfun('BISCUIT')
def myfun(x = 'BAZINGA'):
return 'hello world!{0}'.format(x)
print myfun()
print myfun('BISCUIT')
# Local variable scope
x = 10
def myfun(y):
x =2*y
return 'hello world!{0}'.format(x)
print myfun(x)
print(x)
# global variable scope
x = 40
def myfun(y):
global x
x =2*y
return 'hello world!{0}'.format(x)
print myfun(x)
print(x)
# kwargs example -- numbers
def myfun(*numbers):
print numbers
return sum(numbers)
print myfun(2,3,4,5,6, 6.3)
def myfun(**vars):
print vars
return vars['x'] + vars['y']
print myfun(x = 2, y = 10, z = 100, h = 200)
def myfun(x,y, *nums, **kwargs):
print x,y
print nums, kwargs
return x+y+kwargs['z'] + sum(nums)
print myfun(2,4,6,8,z = 100, h = 200)
#sys.path.insert(fileWD)
import sampleProg1
sampleProg1.fnCopy('choc', 'biscuit')
from sampleProg1 import fnCopy1, fnCopy2
fnCopy1('choc', 'biscuit')
from sampleProg1 import *
fnCopy2('choc', 'biscuit')
import sampleProg1
dir(sampleProg1)
from myPackage1 import sampleProg1
sampleProg1.fnCopy1('source1', 'destination1')
x = 2
print type(x)
x = 2.2
print type(x)
x = '2.2'
print type(x)
x = 'abcdefghijklmno'
print type(x)
print x[0:3]
print x[:-1]
print x[::-1]
print x[::2]
print dir(x)
print len(x)
print x.isupper()
print x.upper()
print x.replace('k','K')
print x + '123'
x = ['a',2, 'b',3 ,'c']
print type(x)
print len(x)
print x[0:3]
print x[-1]
print x[:-1]
print x[::2]
print x + list('xyz')
print x + ['1','2','22','33']
print dir(x)
x.sort()
print(x)
x.append('yellow')
print(x)
x.append(['blue', 'red'])
print(x)
x.extend(['pink', 'green'])
print(x)
x.insert(2, 'MENTAL')
print x
x.reverse()
print x
x.pop(3)
print x
x.remove('a')
print x
y = x.pop(1)
print x,y
print sum([0,1,2])
print sum([True, False, True])
x = ['a','a','b','b','b','c']
y = ['b','c','c','c','d']
print set(x) - set(y)
print set(x).union(set(y))
print set(x).intersection(set(y))
print list(set(x).union(set(y)))
x = ['a',2, 'b',3 ,'c']
#M1
y = []
for k in x:
y.append(str(k)+'L')
print y
#M2
y = [str(k)+'L' for k in x]
#M3
y = map(lambda k: str(k)+'L',x)
#########
filter(lambda k: k>5, range(10))
reduce(lambda a,b: a+b, range(10))
k = []
reduce(lambda a,b: k.append([a,b]), [22,1,2,44,7,3,8])
print k
####Vectorized operations
y = ['1','2','22','33']
x = '_'.join(y)
print x
print x.split('_')
y = ['1',2,'22',33,'abc']
print '+'.join(map(str, y))
print [2,3] #this is a list
print (2,3) #this is a tuple
k = [2,3]
k.append(4)
print k
x = (2,3,4,5,6,7) #immutable
print type(x)
print len(x)
print x[0:3]
print dir(x)
#print x.count(4)
print x.index(4)
x = [('username','<PASSWORD>'),('username1','<PASSWORD>')]
print x
print map(lambda k:
'+'.join(k),
x)
def myJoinfunc(l):
return '+'.join(l)
print map(myJoinfunc, x)
print (('username','<PASSWORD>'),('username1','<PASSWORD>'))
a,b = 2,3
print a,b
a,b,_ = 2,3,4
print a,b
a,b = ('AB','CD')
print a,b
x = [('username','password1'),('username1','<PASSWORD>')]
def myJoinfunc(l):
a,b = l
return '+'+a+'::'+b
print map(myJoinfunc, x)
x = (('username','<PASSWORD>'),('username1','<PASSWORD>'))
print map(myJoinfunc, x)
print (2,3) # length 2
print (2) #will not be a tuple
print (2,) # length 1
[]#Search is index based
()#Search is index based
{}#Search is key based
x = {'username':'password','username1':'password1','username2':'password2'}
print type(x)
print len(x)
print x['username1']
print x.keys()
print x.values()
for k in x:
print [k,x[k]]
print [[k,x[k]] for k in x]
x['username3'] = 'password3' #adding a new user
print x
print dir(x)
for usr,pasw in x.items():
print usr+pasw
x.pop('username3')
print x
del x['username2']
print x
print x['username1']
x['username1'] = 'MYPASSWORD'
print x['username1']
x = {'username':['password',22,'M','HYD'],
'username1':'password1',
'username2':('password2','M'),
'username3': {'LOC':'HYD','Age':28,'PASSWORD':'<PASSWORD>'}}
print x
print x['username']
print x['username'][3]
print x['username3']
print x['username3']['LOC']
a = list('abcde')
b = a
a.remove('c')
print a,b
a = list('abcde')
b = a[:] ##Copy
a.remove('c')
print a,b
a = zip(list('abcde'),list('fghij'))
a = dict(a)
b = a
a.pop('c')
print a,b
a = zip(list('abcde'),list('fghij'))
a = dict(a)
b = a.copy()
a.pop('c')
print a,b
import re
rex = re.compile('\(.*?\)')
A = 'acf () (hihhh) (ashd|} dashfj'
print re.sub(rex, '', A)
rex = re.compile('\(.*?\)|\[.*?\]')
A = 'acf () (hihhh) (ashd|} dashfj p[ascsf]'
print re.sub(rex, '', A)
rex = re.compile('\(.*?\)|\[.*?\]')
rex = re.compile('\(\w+?\)')
print re.sub(rex, '', A)
``` |
{
"source": "JIGNYAS/Networcked",
"score": 3
} |
#### File: Networcked/src/apache_server.py
```python
import os
import time
import webbrowser
from subprocess import PIPE, run
from logo import logo_print
logo_print()
def command(cmd):
return run(cmd, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
# a=input(f'{green}Enter Command:{NC}').split()
def start_server(a):
if(a[0]=="netw"):
if "--se" in a:
t="sudo service apache2 start -D "+a[2]
x=command(t)
webbrowser.open("http://localhost/")
if '--file' in a:
command('sudo rm /var/www/html/index.nginx-debian.html')
print()
os.system("echo '\033[0;31m [Starting] \033[0m Loading The Server...'")
print()
time.sleep(1)
os.system("echo '\033[0;31m [Initializing] \033[0m Starting The Processes....'")
time.sleep(1)
print()
os.system("echo '\033[0;31m [Loading The Host] \033[0m Copying Pages.....'")
print()
time.sleep(1)
os.system("echo '\033[0;32m [All Set !!] \033[0m Server Deployed At http://localhost...'")
print()
time.sleep(1)
t='sudo cp '+a[2]+' /var/www/html/src/cindex.html'
command('mkdir /var/www/html/src')
x=command(t)
#print(t)
webbrowser.open("http://localhost/src/index.html")
```
#### File: Networcked/src/directory_finder.py
```python
import os
import time
import requests
RED='\033[0;31m'
NC='\033[0m'
green='\033[0;32m'
cyan='\033[0;36m'
orange='\033[0;33m'
purple='\033[0;35m'
def Direc_find(a):
t=open(a[3],"r")
for i in t:
x=repr(i).replace(r'\n',"")
req=requests.get(a[2]+"/"+x)
if req.status_code<300:
print(green+req.status_code+NC+" "+a[2]+"/"+x)
else:
print(RED+req.status_code+NC+" "+a[2]+"/"+x)
``` |
{
"source": "jigo2600/jigo2600",
"score": 2
} |
#### File: jigo2600/jigo2600/setup.py
```python
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import sys
import setuptools
import distutils
__version__ = '1.0.0'
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked."""
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
class BuildExt(build_ext):
"A custom build extension for adding compiler-specific options."
def build_extensions(self):
# Get the default compiler options for extensions.
c_opts = []
# Platform-dependent options.
pl = sys.platform
if pl == 'darwin':
pass
elif pl == 'nt':
pass
# Compiler-dependent options.
ct = self.compiler.compiler_type
if ct == 'unix':
c_opts += [f'-DVERSION_INFO="{self.distribution.get_version()}"']
c_opts += ['-fvisibility=hidden']
c_opts += ['-std=c++14']
elif ct == 'msvc':
c_opts += [f'/DVERSION_INFO="{self.distribution.get_version()}"']
c_opts += ['/EHsc']
# Set the options for each target.
for ext in self.extensions:
ext.extra_compile_args = c_opts
# Build the extension.
build_ext.build_extensions(self)
setup(
name='jigo2600',
version=__version__,
author='Jigo2600 Team',
author_email='<EMAIL>',
url='https://github.com/jigo2600/jigo2600',
description='An Atari 2600 emulator',
long_description=open('README.md').read(),
packages=['jigo2600'],
package_dir={'': 'python'},
package_data={'jigo2600': ['gamecontrollerdb.txt', 'cartridges.json']},
ext_modules=[
Extension(
'jigo2600.core',
[
'python/jigo2600/core.cpp',
'src/Atari2600.cpp',
'src/Atari2600Cartridge.cpp',
'src/M6502.cpp',
'src/M6502Disassembler.cpp',
'src/M6532.cpp',
'src/TIA.cpp',
'src/TIASound.cpp',
],
include_dirs=[
'src/',
get_pybind_include(),
get_pybind_include(user=True)
],
language='c++'
),
],
install_requires=['pybind11>=2.2'],
cmdclass={'build_ext': BuildExt},
zip_safe=False,
)
``` |
{
"source": "jigpu/radalert-py",
"score": 3
} |
#### File: examples/log/logger.py
```python
import sys
import threading
import urllib.request
from datetime import datetime
from string import Template
from radalert.ble import RadAlertLEStatus
from radalert.ble import RadAlertLEQuery
from radalert.util.filter import FIRFilter
from radalert.util.filter import IIRFilter
class ConsoleLogger:
"""
Simple console-logging class for the Radiation Alert devices.
Periodically prints the properties tracked by the backend to the
console.
"""
def __init__(self, backend, delay=30):
self.backend = backend
self.delay = delay
self._running = False
self._thread_event = threading.Event()
def __str__(self):
try:
update_delay = datetime.now() - self.backend.last_update
if update_delay.total_seconds() > self.delay:
return ""
actual = self.backend.actuals.value
avg_short = self.backend.averages[0].value * 60
avg_medium = self.backend.averages[1].value * 60
avg_long = self.backend.averages[2].value * 60
maximum = self.backend.maximum.value * 60
minimum = self.backend.minimum.value * 60
table = (
f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}",
f"{self.backend.battery}%",
f"{self.backend.conversion}",
f"{actual}",
f"{avg_short:.2f}",
f"{avg_medium:.2f}",
f"{avg_long:.2f}",
f"{maximum:.2f}",
f"{minimum:.2f}",
)
return "\t".join(table)
except:
return ""
def header(self):
def timespan(time):
if time <= 60:
return (time, "s")
time /= 60
if time <= 60:
return (time, "m")
time /= 60
if time <= 24:
return (time, "h")
time /= 24
return (time, "d")
ts_actual = timespan(self.backend.actual_samples)
ts_short = timespan(self.backend.average_samples[0])
ts_medium = timespan(self.backend.average_samples[1])
ts_long = timespan(self.backend.average_samples[2])
ts_minmax = timespan(self.backend.minmax_samples)
table = (
f"time",
f"battery",
f"cpm/(mR/h)",
f"{ts_actual[0]}{ts_actual[1]}-count",
f"{ts_short[0]}{ts_short[1]}-avg-cpm",
f"{ts_medium[0]}{ts_medium[1]}-avg-cpm",
f"{ts_long[0]}{ts_long[1]}-avg-cpm",
f"{ts_minmax[0]}{ts_minmax[1]}-max-cpm",
f"{ts_minmax[0]}{ts_minmax[1]}-min-cpm",
)
return "\t".join(table)
def spin(self):
"""
Spin our wheels periodically logging to the console.
This should be executed in a seperate thread to ensure that
execution can still continue.
"""
if not self._running:
print(self.header())
self._running = True
while self._running:
line = self.__str__()
if len(line) > 0:
print(line)
self._thread_event.wait(timeout=self.delay)
def stop(self):
"""Stop execution of the spin function."""
self._running = False
self._thread_event.set()
class GmcmapLogger:
"""
Simple class to take care of logging data to the GMC.MAP service.
"""
_URL_TEMPLATE=Template("http://www.GMCmap.com/log2.asp?AID=${GMC_ACCOUNT_ID}&GID=${GMC_GEIGER_ID}&CPM=${CPM}&ACPM=${ACPM}&uSV=${USV}")
def __init__(self, backend, account_id, geiger_id, delay=180):
self.backend = backend
self.account_id = account_id
self.geiger_id = geiger_id
self.delay = delay
self._running = False
self._thread_event = threading.Event()
def send_update(self):
try:
update_delay = datetime.now() - self.backend.last_update
if update_delay.total_seconds() > self.delay:
return
avg_short = self.backend.averages[0].value * 60
avg_long = self.backend.averages[2].value * 60
usv = avg_short / self.backend.conversion * 10
self.send_values(avg_short, avg_long, usv)
except:
print("Unable to send values to gmc server", file=sys.stderr)
def send_values(self, cpm, acpm, usv):
"""
Send the log data to the service.
"""
url=GmcmapLogger._URL_TEMPLATE.substitute(
GMC_ACCOUNT_ID=self.account_id,
GMC_GEIGER_ID=self.geiger_id,
CPM=f'{cpm:.2f}',
ACPM=f'{acpm:.2f}',
USV=f'{usv:.5f}'
)
urllib.request.urlopen(url).read()
def spin(self):
"""
Spin our wheels periodically logging to the server.
This should be executed in a seperate thread to ensure that
execution can still continue.
"""
if not self._running:
self._running = True
while self._running:
self.send_update()
self._thread_event.wait(timeout=self.delay)
def stop(self):
"""Stop execution of the spin function."""
self._running = False
self._thread_event.set()
class RadmonLogger:
"""
Simple class to take care of logging data to the Radmon service.
"""
_URL_TEMPLATE=Template("http://radmon.org/radmon.php?function=submit&user=${RADMON_USERNAME}&password=${<PASSWORD>}&value=${CPM}&unit=CPM")
def __init__(self, backend, account_id, geiger_id, delay=180):
self.backend = backend
self.account_id = account_id
self.geiger_id = geiger_id
self.delay = delay
self._running = False
self._thread_event = threading.Event()
def send_update(self):
try:
update_delay = datetime.now() - self.backend.last_update
if update_delay.total_seconds() > self.delay:
return
avg_short = self.backend.averages[0].value * 60
self.send_values(avg_short)
except:
print("Unable to send values to radmon server", file=sys.stderr)
def send_values(self, cpm):
"""
Send the log data to the service.
"""
url=RadmonLogger._URL_TEMPLATE.substitute(
RADMON_USERNAME=self.account_id,
RADMON_PASSWORD=self.geiger_id,
CPM=f'{cpm:.2f}',
)
urllib.request.urlopen(url).read()
def spin(self):
"""
Spin our wheels periodically logging to the server.
This should be executed in a seperate thread to ensure that
execution can still continue.
"""
if not self._running:
self._running = True
while self._running:
self.send_update()
self._thread_event.wait(timeout=self.delay)
def stop(self):
"""Stop execution of the spin function."""
self._running = False
self._thread_event.set()
class LogBackend:
"""
Backend class to interface with the radalert package.
Keeps track of various statistics and device state for loggers.
"""
def __init__(self, actual_samples=60, average_samples=(300,43200,7776000), minmax_samples=300):
"""
Create a new logger with the given properties.
"""
self.last_update = None
self.conversion = None
self.battery = 0
self.actual_samples = actual_samples
self.actuals = FIRFilter(actual_samples, sum)
self.average_samples = average_samples
self.averages = (
FIRFilter(average_samples[0]),
IIRFilter.create_from_time_constant(average_samples[1]),
IIRFilter.create_from_time_constant(average_samples[2]),
)
self.minmax_samples = minmax_samples
self.maximum = FIRFilter(minmax_samples, max)
self.minimum = FIRFilter(minmax_samples, min)
def radalert_le_callback(self, data):
"""
Update internal state whenever a RadAlertLE has new data.
This is a callback that should be given to the RadAlertLE object
so that we can be informed whenever new data is available.
"""
if isinstance(data, RadAlertLEStatus):
self._on_data(data)
elif isinstance(data, RadAlertLEQuery):
self._on_query(data)
def _on_data(self, data):
self.last_update=datetime.now()
cps = data.cps
self.battery = data.battery_percent
# Do not initalize actual count with any kind of average
self.actuals.iterate(cps)
# Initialize averaging filters to the device average
if self.averages[0].value is None:
self.averages[0].iterate(data.cpm / 60)
if self.averages[1].value is None:
self.averages[1].iterate(data.cpm / 60)
if self.averages[2].value is None:
self.averages[2].iterate(data.cpm / 60)
self.averages[0].iterate(cps)
self.averages[1].iterate(cps)
self.averages[2].iterate(cps)
# Initialize the minmax filters to the device average
if len(self.maximum.values) == 0:
self.maximum.iterate(data.cpm / 60)
if len(self.maximum.values) == 0:
self.maximum.iterate(data.cpm / 60)
self.maximum.iterate(self.averages[0].value)
self.minimum.iterate(self.averages[0].value)
def _on_query(self, data):
self.conversion = data.conversion_factor
``` |
{
"source": "jigpu/sshlib",
"score": 3
} |
#### File: resources/asyncssh-server/server.py
```python
import asyncio, asyncssh, crypt, sys, time, logging
passwords = {
'<PASSWORD>': '<PASSWORD>' # password of '<PASSWORD>'
}
def handle_client(process):
process.stdout.write('success\n')
time.sleep(10)
process.exit(0)
class MySSHServer(asyncssh.SSHServer):
def __init__(self):
self._conn = None
def connection_made(self, conn):
print('SSH connection received from %s.' %
conn.get_extra_info('peername')[0])
self._conn = conn;
def connection_lost(self, exc):
if exc:
print('SSH connection error: ' + str(exc), file=sys.stderr)
else:
print('SSH connection closed.')
def begin_auth(self, username):
# If the user's password is the empty string, no auth is required
self._conn.set_authorized_keys('authorized_keys')
return passwords.get(username) != ''
def password_auth_supported(self):
return True
def validate_password(self, username, password):
pw = passwords.get(username, '*')
return crypt.crypt(password, pw) == pw
def public_key_auth_supported(self):
return True
async def start_server():
asyncssh.set_log_level('DEBUG')
asyncssh.set_debug_level(2)
await asyncssh.create_server(MySSHServer, '', 8022,
server_host_keys=[
'/etc/ssh/ssh_host_ecdsa_key',
'/etc/ssh/ssh_host_rsa_key',
],
process_factory=handle_client)
print("SETTING LOGGER")
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
print("STARTING UP")
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(start_server())
except (OSError, asyncssh.Error) as exc:
sys.exit('Error starting server: ' + str(exc))
print("LISTENER READY")
# Only run the loop once for testing
#loop.call_soon(loop.stop)
loop.run_forever()
``` |
{
"source": "Jigsaw111/nondice",
"score": 2
} |
#### File: plugins/char_make/__init__.py
```python
from nonebot import on_command, CommandSession
from .calculator import Calculator
__plugin_name__ = '人物作成'
__plugin_usage__ = (
'sg 变量之轮(轮回游戏二版)人物作成'
'sk 时空之轮人物作成'
'coc COC7版人物作成(不支持6版)'
'dnd DND任务作成'
)
@on_command('sg', aliases=('轮回游戏'),only_to_me=False)
async def sg(session: CommandSession):
try:round_num=int(session.current_arg_text.strip())
except:round_num=1
if not round_num or round_num>10:await session.send('非法轮数')
message=''
for round in range(round_num):
if round:message+='\n'
attr_name=['壮硕值','爆发力','协调性','精神力','反应力','幸运值']
attr_cmd=['2d3','2d3','2d3','2d3','2d3','2d3']
for i in range(len(attr_name)):
attr_cal=Calculator(attr_cmd[i])
attr_cal.calculate_with_bracket()
if i:message+=','
message+=attr_name[i]+':'+str(int(attr_cal.result))
await session.send(message)
@on_command('sk', aliases=('时空之轮'),only_to_me=False)
async def sk(session: CommandSession):
try:round_num=int(session.current_arg_text.strip())
except:round_num=1
if not round_num or round_num>10:await session.send('非法轮数')
message=''
for round in range(round_num):
if round:message+='\n'
attr_name=['力量','敏捷','体力','精神','智慧','魅力','幸运']
attr_cmd=['2d5','2d5','2d5','2d5','2d5','2d5','2d5']
for i in range(len(attr_name)):
attr_cal=Calculator(attr_cmd[i])
attr_cal.calculate_with_bracket()
if i:message+=','
message+=attr_name[i]+':'+str(int(attr_cal.result))
await session.send(message)
@on_command('coc', aliases=('克苏鲁的呼唤'),only_to_me=False)
async def coc(session: CommandSession):
try:round_num=int(session.current_arg_text.strip())
except:round_num=1
if not round_num or round_num>10:await session.send('非法轮数')
message=''
for round in range(round_num):
if round:message+='\n'
attr_name=["力量", "体质", "体型", "敏捷", "外貌", "智力", "意志", "教育", "幸运"]
attr_cmd=['3d6*5','3d6*5','(2d6+6)*5','3d6*5','3d6*5','(2d6+6)*5','3d6*5','(2d6+6)*5','3d6*5']
attr_count=0
for i in range(len(attr_name)):
attr_cal=Calculator(attr_cmd[i])
attr_cal.calculate_with_bracket()
attr_count+=attr_cal.result
if i:message+=','
message+=attr_name[i]+':'+str(int(attr_cal.result))
message +="\n总计:"+str(int(attr_count-attr_cal.result))+'/'+str(int(attr_count))
await session.send(message)
@on_command('dnd', aliases=('龙与地下城'),only_to_me=False)
async def coc(session: CommandSession):
try:round_num=int(session.current_arg_text.strip())
except:round_num=1
if not round_num or round_num>10:await session.send('非法轮数')
message=''
for round in range(round_num):
if round:message+='\n'
attr_name=["力量", "体质", "敏捷", "智力", "感知", "魅力"]
attr_cmd=['4d6k3','4d6k3','4d6k3','4d6k3','4d6k3','4d6k3']
for i in range(len(attr_name)):
attr_cal=Calculator(attr_cmd[i])
attr_cal.calculate_with_bracket()
if i:message+=','
message+=attr_name[i]+':'+str(int(attr_cal.result))
await session.send(message)
```
#### File: plugins/draw_deck/decks.py
```python
import os
import json
import yaml
import random
import re
from .calculator import Calculator
def get_deck_list():
deck_list_json=load_decks()
message='读取到以下牌堆:'
for i in deck_list_json.keys():
if i[0]=='_':continue
message+='\n'+i
return message
def load_decks():
deck_list_data={}
deck_list = os.listdir(os.path.join(os.path.dirname(__file__),'decks'))
for i in deck_list:
f=open(os.path.join(os.path.dirname(__file__),'decks',i),'r',encoding='utf-8')
if i[len(i)-1]=='n':deck_list_data.update(json.loads(f.read()))
else:deck_list_data.update(yaml.load(f.read()))
f.close()
return deck_list_data
def get_value(key,num=1,mode=False):
value_list=load_decks()[key]
message=''
for i in range(num):
if mode:
value=value_list.pop(random.randint(0,len(value_list)-1))
else:
value=value_list[random.randint(0,len(value_list)-1)]
while '{' in value:
value=get_sub_key(value)
while '[' in value:
value=get_calculator(value)
if i:message+='\n'
message+=value
return message
def get_sub_key(value):
mode=False
l=value[:value.index('}')]
r=value[value.index('}')+1:]
key_sub=l[l.rindex('{')+1:]
l=l[:l.rindex('{')]
if key_sub[0]=='%':
mode=True
key_sub=key_sub[1:]
return l+get_value(key_sub,1,mode)+r
def get_calculator(value):
l=value[:value.index(']')]
r=value[value.index(']')+1:]
key_sub=l[l.rindex('[')+1:]
l=l[:l.rindex('[')]
cal=Calculator(key_sub)
cal.calculate_with_bracket()
return l+str(int(cal.result))+r
def separate_deckname_and_num(args,mode=False):
args=args.split()
deckname=args[0]
try:num=int(args[1])
except:num=1
return '抽到了:\n'+get_value(deckname,num,mode)
# 调试用
if __name__=='__main__':
print(separate_deckname_and_num(input()))
```
#### File: plugins/draw_deck/__init__.py
```python
from nonebot import on_command, CommandSession
from nonebot.command import call_command
from .decks import get_deck_list,separate_deckname_and_num
__plugin_name__ = '拓展牌堆'
__plugin_usage__ = (
'draw list 牌堆列表'
'draw deckname 抽取牌堆'
)
@on_command('draw',only_to_me=False)
async def draw(session: CommandSession):
args=session.current_arg_text.strip()
if args=='list':await session.send(get_deck_list())
else:
await session.send(separate_deckname_and_num(args))
@on_command('ndraw',only_to_me=False)
async def ndraw(session: CommandSession):
args=session.current_arg_text.strip()
if args=='list':await session.send(get_deck_list())
else:
await session.send(separate_deckname_and_num(args,True))
@on_command('name',only_to_me=False)
async def name(session:CommandSession):
try:
await call_command(session.bot,session.event,'draw',current_arg='_name_ '+str(int(session.current_arg_text.strip())))
except:
await call_command(session.bot,session.event,'draw',current_arg='_name_'+session.current_arg_text.strip())
@on_command('ti',only_to_me=False)
async def ti(session:CommandSession):
await call_command(session.bot,session.event,'draw',current_arg='_即时症状')
@on_command('li',only_to_me=False)
async def li(session:CommandSession):
await call_command(session.bot,session.event,'draw',current_arg='_总结症状')
```
#### File: src/plugins/help.py
```python
from nonebot import on_command, CommandSession,get_loaded_plugins
@on_command('help', aliases=['使用说明','帮助'],only_to_me=False)
async def _(session: CommandSession):
# 获取设置了名称的插件列表
plugins = list(filter(lambda p: p.name,get_loaded_plugins()))
arg = session.current_arg_text.strip().lower()
if not arg:
# 如果用户没有发送参数,则发送功能列表
await session.send(
'目前支持的功能有:\n' + '\n'.join(p.name for p in plugins))
return
# 如果发了参数则发送相应命令的使用帮助
for p in plugins:
if p.name.lower() == arg:
await session.send(p.usage)
```
#### File: plugins/jrrp/data_source.py
```python
import json
import time
import os
import random
import httpx
# 是否使用溯洄的API以达到与Dice!同步的目的
IS_ONLINE=True
def get_jrrp_local(qq_id):
day=str(time.localtime(time.time())[2])
try:
jrrp_data=open(os.path.join(os.path.dirname(__file__),'jrrp_data.json'),'r',encoding='utf-8')
jrrp_data_json=json.loads(jrrp_data.read())
jrrp_data.close()
except:
jrrp_data_json={}
if qq_id in jrrp_data_json.keys() :
if day!=jrrp_data_json[qq_id]['day']:
jrrp=str(random.randint(1,100))
jrrp_data_json[qq_id]={'day':day,'jrrp':jrrp}
else:
jrrp=str(random.randint(1,100))
jrrp_data_json[qq_id]={'day':day,'jrrp':jrrp}
jrrp_data=open(os.path.join(os.path.dirname(__file__),'jrrp_data.json'),'w',encoding='utf-8')
jrrp_data.write(json.dumps(jrrp_data_json))
jrrp_data.close()
return '你今天的人品值是:'+jrrp_data_json[qq_id]['jrrp']
def get_jrrp_online(bot_qq_id,qq_id):
url='http://api.kokona.tech:5555/jrrp'
data={'User-Agent':'NoDice','QQ':bot_qq_id,'v':'20190114','QueryQQ':qq_id}
res=httpx.post(url=url,data=data)
return '你今天的人品值是:'+res.text
def get_jrrp(bot_qq_id,qq_id):
if IS_ONLINE:
return get_jrrp_online(bot_qq_id,qq_id)
else:
return get_jrrp_local(qq_id)
if __name__=='__main__':
print(get_jrrp('1234567890','1234567890'))
```
#### File: plugins/weather/data_source.py
```python
import requests
from .config import *
from aiocache import cached
@cached(ttl=60)
async def get_weather_short(city: str) -> str:
weather_json=get_weather_json(city)
weather=weather_json["result"]["weather"]
temperature=weather_json["result"]['temperature_curr']
return f'{city}当前天气{weather},温度{temperature}'
@cached(ttl=60)
async def get_weather_desc(city: str) -> str:
weather_json=get_weather_json(city)
weather=weather_json["result"]["weather"]
temperature=weather_json["result"]['temperature_curr']
humidity=weather_json["result"]['humidity']
wind=weather_json["result"]['wind']
return f'{city}当前天气{weather},温度{temperature},空气湿度{humidity},{wind}'
def get_weather_json(city: str):
url = 'http://api.k780.com'
params = {
'app' : 'weather.today',
'weaid' : city,
'appkey' : WEATHER_KEY,
'sign' : WEATHER_SIGN,
'format' : 'json',
}
json=requests.get(url,params).json()
if json['success']=="1":
return json
``` |
{
"source": "Jigsaw-Code/censoredplanet-analysis",
"score": 2
} |
#### File: censoredplanet-analysis/pipeline/manual_e2e_test.py
```python
import datetime
import os
import pwd
from typing import List, Any
import unittest
import warnings
from apache_beam.options.pipeline_options import PipelineOptions
from google.cloud import bigquery as cloud_bigquery
from google.cloud.exceptions import NotFound
import firehook_resources
from pipeline import run_beam_tables
from pipeline.metadata import ip_metadata
# The test table is written into the <project>:<username> dataset
username = pwd.getpwuid(os.getuid()).pw_name
BEAM_TEST_TABLE = f'{username}.manual_test'
BQ_TEST_TABLE = f'{firehook_resources.PROJECT_NAME}.{BEAM_TEST_TABLE}'
JOB_NAME = 'manual_test_job'
# These methods are used to monkey patch the data_to_load method in beam_tables
# in order to return our test data.
#
# These files represent different types scan types (echo/discard/http/https)
# The actual pipeline doesn't write different scan types to the same table
# But since the table schemas are all the same we do it here to test all the
# different types of fields.
#
# These files contain real sample data, usually 4 measurements each, the first 2
# are measurements that succeeded, the last two are measurements that failed.
def local_data_to_load_http_and_https(*_: List[Any]) -> List[str]:
return [
'pipeline/e2e_test_data/http_results.json',
'pipeline/e2e_test_data/https_results.json'
]
def local_data_to_load_discard_and_echo(*_: List[Any]) -> List[str]:
return [
'pipeline/e2e_test_data/discard_results.json',
'pipeline/e2e_test_data/echo_results.json'
]
def get_local_pipeline_options(*_: List[Any]) -> PipelineOptions:
# This method is used to monkey patch the get_pipeline_options method in
# beam_tables in order to run a local pipeline.
return PipelineOptions(
runner='DirectRunner',
job_name=JOB_NAME,
project=firehook_resources.PROJECT_NAME,
temp_location=firehook_resources.BEAM_TEMP_LOCATION)
def run_local_pipeline(incremental: bool = False) -> None:
"""Run a local pipeline.
Reads local files but writes to bigquery.
Args:
incremental: bool, whether to run a full or incremental local pipeline.
"""
# pylint: disable=protected-access
test_runner = run_beam_tables.get_firehook_beam_pipeline_runner()
# Monkey patch the get_pipeline_options method to run a local pipeline
test_runner._get_pipeline_options = get_local_pipeline_options # type: ignore
# Monkey patch the data_to_load method to load only local data
if incremental:
test_runner._data_to_load = local_data_to_load_http_and_https # type: ignore
else:
test_runner._data_to_load = local_data_to_load_discard_and_echo # type: ignore
test_runner.run_beam_pipeline('test', incremental, JOB_NAME, BEAM_TEST_TABLE,
None, None)
# pylint: enable=protected-access
def clean_up_bq_table(client: cloud_bigquery.Client, table_name: str) -> None:
try:
client.get_table(table_name)
client.delete_table(table_name)
except NotFound:
pass
def get_bq_rows(client: cloud_bigquery.Client, table_name: str) -> List:
return list(client.query(f'SELECT * FROM {table_name}').result())
class PipelineManualE2eTest(unittest.TestCase):
"""Manual tests that require access to cloud project resources."""
def test_pipeline_e2e(self) -> None:
"""Test the full pipeline by running it twice locally on a few files."""
# Suppress some unittest socket warnings in beam code we don't control
warnings.simplefilter('ignore', ResourceWarning)
client = cloud_bigquery.Client()
try:
run_local_pipeline(incremental=False)
written_rows = get_bq_rows(client, BQ_TEST_TABLE)
self.assertEqual(len(written_rows), 28)
run_local_pipeline(incremental=True)
written_rows = get_bq_rows(client, BQ_TEST_TABLE)
self.assertEqual(len(written_rows), 53)
# Domain appear different numbers of times in the test table depending on
# how their measurement succeeded/failed.
expected_single_domains = [
'boingboing.net', 'box.com', 'google.com.ua', 'mos.ru', 'scribd.com',
'uploaded.to', 'www.blubster.com', 'www.orthodoxconvert.info'
]
expected_triple_domains = ['www.arabhra.org']
expected_sextuple_domains = [
'discover.com', 'peacefire.org', 'secondlife.com', 'www.89.com',
'www.casinotropez.com', 'www.epa.gov', 'www.sex.com'
]
all_expected_domains = (
expected_single_domains + expected_triple_domains * 3 +
expected_sextuple_domains * 6)
written_domains = [row[0] for row in written_rows]
self.assertListEqual(
sorted(written_domains), sorted(all_expected_domains))
finally:
clean_up_bq_table(client, BQ_TEST_TABLE)
def test_ipmetadata_init(self) -> None:
# This E2E test requires the user to have access to the
# gs://censoredplanet_geolocation bucket.
ip_metadata_db = ip_metadata.get_firehook_ip_metadata_db(
datetime.date(2018, 7, 27))
metadata = ip_metadata_db.lookup('1.1.1.1')
self.assertEqual(metadata, ('1.1.1.0/24', 13335, 'CLOUDFLARENET',
'Cloudflare, Inc.', 'Content', 'US'))
# This test is not run by default in unittest because it takes about a minute
# to run, plus it reads from and writes to bigquery.
#
# To run it manually use the command
# python3 -m unittest pipeline.manual_e2e_test.PipelineManualE2eTest
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jigsaw-labs/edx.oauth",
"score": 3
} |
#### File: edx.oauth/jigsawlabs_backends/auth0.py
```python
from urllib2 import urlopen
from jose import jwt
from social_core.backends.oauth import BaseOAuth2
class Auth0(BaseOAuth2):
"""Auth0 OAuth authentication backend"""
name = 'auth0'
SCOPE_SEPARATOR = ' '
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [
('picture', 'picture')
]
BASE_URL = 'https://jigsawlabs.auth0.com'
AUTHORIZATION_URL = BASE_URL + '/oauth/authorize'
ACCESS_TOKEN_URL = BASE_URL + '/oauth/token'
USER_QUERY = BASE_URL + '/api/user?'
SOCIAL_AUTH_TRAILING_SLASH = False # Remove trailing slash from routes
SOCIAL_AUTH_AUTH0_DOMAIN = 'jigsawlabs.auth0.com'
#SOCIAL_AUTH_AUTH0_KEY = '<KEY>'
#SOCIAL_AUTH_AUTH0_SECRET = '<KEY>'
SOCIAL_AUTH_AUTH0_SCOPE = [
'openid',
'profile'
]
def authorization_url(self):
return self.BASE_URL + '/authorize'
def access_token_url(self):
return self.BASE_URL + '/oauth/token'
def get_user_id(self, details, response):
"""Return current user id."""
return details['user_id']
def get_user_details(self, response):
# Obtain JWT and the keys to validate the signature
id_token = response.get('id_token')
jwks = urlopen(self.BASE_URL + '/.well-known/jwks.json')
issuer = self.BASE_URL + '/'
audience = self.setting('KEY') # CLIENT_ID
payload = jwt.decode(id_token,
jwks.read(),
algorithms=['RS256'],
audience=audience,
issuer=issuer)
fullname, first_name, last_name = self.get_user_names(payload['name'])
return {'username': payload['nickname'],
'email': payload['email'],
'email_verified': payload.get('email_verified', False),
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'picture': payload['picture'],
'user_id': payload['sub']}
``` |
{
"source": "jigschristian/truecaller",
"score": 2
} |
#### File: truecaller/truecaller_api/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from phonenumber_field.modelfields import PhoneNumberField
class Contact(models.Model):
name = models.CharField(max_length=256, blank=False)
email = models.EmailField(max_length=70, null=True, blank=True, unique=True)
phone = PhoneNumberField(null=False, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name + " - " + str(self.phone)
class OtherContact(models.Model):
contact = models.ForeignKey(Contact, on_delete=models.CASCADE)
phone = PhoneNumberField(null=False, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.contact.name + " - " + str(self.phone)
``` |
{
"source": "Jigsy1/UUID2",
"score": 4
} |
#### File: Jigsy1/UUID2/UUID2.py
```python
import random
import string
import tkinter
from tkinter import *
uuid2 = Tk()
uuid2.title("UUID2")
uuid2.resizable(height=False, width=False)
def makeID():
str = ""
out = ""
str = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(32))
out = f"{str[0:8]}-{str[8:12]}-{str[12:16]}-{str[16:20]}-{str[20:]}"
if braces.get() == 1:
out = "{" + out + "}"
print(out)
uuid2.clipboard_clear()
uuid2.clipboard_append(out)
uuid2.update()
braces = IntVar(value=1)
braceBox = Checkbutton(uuid2, text="{Use Braces}", variable=braces, onvalue=1, offvalue=0)
genButton = Button(uuid2, text="Generate", command=makeID)
braceBox.pack(side = RIGHT)
genButton.pack(side = LEFT)
uuid2.mainloop()
# EOF
``` |
{
"source": "Jigyanshu17/Python-Ka-Saara-Gyaan",
"score": 4
} |
#### File: Python-Ka-Saara-Gyaan/Chapter 8/Functions.py
```python
def function1(a,b):
"""This is a function which will calculate average of two numbers.""" #docstrings
average = (a+b)/2
#print(average)
return average # now it will return value
#function1(5,7)
#v = function1(5,7) # will return none
#print(v)
print(function1.__doc__)
```
#### File: Python-Ka-Saara-Gyaan/Chapter 8/Recursions.py
```python
def Factorial_Recursive(n):
"""
:param n: Integer
:return: n*n-1*n-2*n-3....1
"""
if n == 1:
return 1
else:
return n * Factorial_Recursive(n-1)
number = int(input("Enter the Number: "))
print("Factorial using Recursive Method is", Factorial_Recursive(number))
"""
LOGIC
5 * Factorial_Recursive(4)
5 * 4 * Factorial_Recursive(3)
5 * 4 * 3 * Factorial_Recursive(2)
5 * 4 * 3 * 2 * Factorial_Recursive(1)
5 * 4 * 3 * 2 * 1 = 120....
"""
```
#### File: Python-Ka-Saara-Gyaan/Chapter 9/block.py
```python
def greet(name):
gr = "Have a good day\t" + name
print(gr)
greet("Jiggu")
```
#### File: Python-Ka-Saara-Gyaan/Misc/Args & Kwargs.py
```python
def funargs(normal,*args, **kwargs):
print(normal)
for item in args:
print(item)
print("\nNow I would like to introduce some of our heroes")
for key,value in kwargs.items():
print(f"{key} is a {value}")
# As a tuple
# function_name_print("Jiggu","g","f","dd")
list = ["Jiggu","g","f","dd"]
normal = "Yhis is normal"
kw = {"Rohan":"Monitor", "Jiggu":"Sports coach","Him":"Programmer"}
funargs(normal,*list,**kw)
```
#### File: Python-Ka-Saara-Gyaan/Misc/Lambda Function.py
```python
a = [[1,14],[5,6],[7,8]]
a.sort(key = lambda x:x[1])
print(a)
```
#### File: Python-Ka-Saara-Gyaan/Project/Library Management System.py
```python
from telegram import user
class Library:
def __init__(self, list, name):
self.booksList = list
self.name = name
self.lendDict = {}
def displayBooks(self):
print(f"We have following Books in library: {self.name}")
for book in self.booksList:
print(book)
def lendBook(self, user, book):
if book not in self.lendDict.keys():
self.lendDict.update({book:user})
print("Lender-Book database has been updated. You can take the book now")
else:
print(f"Book is already being used by {self.lendDict[book]}")
def addBook(self, book):
self.booksList.append(book)
print("Book has been added to the book list.")
def returnBook(self, book):
self.lebdDict.pop(book)
if __name__ == '__main__':
jiggu = Library(['Python', 'Rich Dad Poor Dad', 'Think and Grow rich'], "Jigyanshu")
while(True):
print(f"Welcome to the {jiggu.name} library. Enter the choice to continue.")
print("1. Display Books")
print("2. Lend a Book")
print("3. Add a Book")
print("4. Return a Book")
user_choice = input()
if user_choice not in ['1', '2', '3', '4']:
print("Please enter a valid option")
continue
else:
user_choice = int(user_choice)
if user_choice == 1:
jiggu.displayBooks()
elif user_choice == 2:
book = input("Enter the name of the book you want to lend-->>")
name = input("Enter your Name -- ")
jiggu.lendBook(user, book)
elif user_choice == 3:
book = input("Enter the name of the book you want to Add-->>")
jiggu.addBook(book)
elif user_choice == 4:
book = input("Enter the name of the book you want to Return-->>")
jiggu.returnBook(book)
else:
print("Not a valid Option")
print("Press q to quit and c to continue")
user_choice2 = ""
while(user_choice2!="c" and user_choice2 != "q"):
user_choice2 = input()
if user_choice2 == "q":
exit()
if user_choice2 == "c":
continue
``` |
{
"source": "jigyasudhingra/E-commerce-Store-Using-React-And-Django",
"score": 2
} |
#### File: ecommerce/api/views.py
```python
from django.http import JsonResponse
# Create your views here.
def home(request):
return JsonResponse({'info': 'Ecommerce Store Using React And Django', 'name': "<NAME>"})
``` |
{
"source": "Jigyasu/droidlet",
"score": 2
} |
#### File: agents/locobot/teleop.py
```python
import os
import subprocess
import time
import signal
import random
import logging
import faulthandler
import threading
import functools
from multiprocessing import set_start_method
from droidlet import dashboard
from droidlet.dashboard.o3dviz import o3dviz
import numpy as np
from scipy.spatial import distance
import open3d as o3d
import time
import math
if __name__ == "__main__":
# this line has to go before any imports that contain @sio.on functions
# or else, those @sio.on calls become no-ops
dashboard.start()
o3dviz.start()
from droidlet.interpreter.robot import (
dance,
default_behaviors,
LocoGetMemoryHandler,
PutMemoryHandler,
LocoInterpreter,
)
from droidlet.dialog.robot import LocoBotCapabilities
from droidlet.event import sio
faulthandler.register(signal.SIGUSR1)
random.seed(0)
log_formatter = logging.Formatter(
"%(asctime)s [%(filename)s:%(lineno)s - %(funcName)s() %(levelname)s]: %(message)s"
)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger().handlers.clear()
mover = None
@sio.on("sendCommandToAgent")
def get_command(sid, command):
command, value = command.split()
print(command)
print(value)
test_command(sid, [command], value=value)
@sio.on("logData")
def log_data(sid, seconds):
test_command(sid, ["LOG_DATA"], value=seconds)
@sio.on("stopRobot")
def stop_robot(sid):
test_command(sid, ["STOP_ROBOT"])
@sio.on("unstopRobot")
def unstop_robot(sid):
test_command(sid, ["UNSTOP_ROBOT"])
def test_command(sid, commands, data={"yaw": 0.1, "velocity": 0.1, "move": 0.3}, value=None):
print(commands, data, value)
move_dist = float(data['move'])
yaw = float(data['yaw'])
velocity = float(data['velocity'])
global mover
if mover == None:
return
if value is not None:
move_dist = value
def sync():
time.sleep(10)
for i in range(50):
mover.get_rgb_depth()
movement = [0.0, 0.0, 0.0]
for command in commands:
if command == "MOVE_FORWARD":
movement[0] += float(move_dist)
print("action: FORWARD", movement)
mover.move_relative([movement], blocking=False)
elif command == "MOVE_BACKWARD":
movement[0] -= float(move_dist)
print("action: BACKWARD", movement)
mover.move_relative([movement], blocking=False)
elif command == "MOVE_LEFT":
movement[2] += yaw
print("action: LEFT", movement)
mover.move_relative([movement], blocking=False)
elif command == "MOVE_RIGHT":
movement[2] -= yaw
print("action: RIGHT", movement)
mover.move_relative([movement], blocking=False)
elif command == "PAN_LEFT":
mover.bot.set_pan(mover.get_pan() + yaw)
sync()
elif command == "PAN_RIGHT":
mover.bot.set_pan(mover.get_pan() - yaw)
sync()
elif command == "TILT_UP":
mover.bot.set_tilt(mover.get_tilt() + yaw)
print("action: TILT_UP", mover.get_tilt() + yaw)
sync()
elif command == "TILT_DOWN":
mover.bot.set_tilt(mover.get_tilt() - yaw)
sync()
elif command == "LOG_DATA":
mover.log_data_start(float(value)) # in seconds
elif command == "STOP_ROBOT":
mover.stop()
elif command == "UNSTOP_ROBOT":
mover.unstop()
elif command == "SET_PAN":
print("action: SET_PAN", float(value))
mover.bot.set_pan(float(value))
sync()
elif command == "SET_TILT":
print("action: SET_TILT", float(value))
mover.bot.set_tilt(float(value))
sync()
elif command == "MOVE_ABSOLUTE":
xyyaw_s = value.split(',')
xyyaw_f = [float(v) for v in xyyaw_s]
print("action: MOVE_ABSOLUTE", xyyaw_f)
mover.move_absolute(xyyaw_f, blocking=False)
sync()
elif command == "LOOK_AT":
xyz = value.split(',')
xyz = [float(p) for p in xyz]
print("action: LOOK_AT", xyz)
mover.look_at(xyz, turn_base=False)
elif command == "RESET":
mover.bot.set_tilt(0.)
mover.bot.set_pan(0.)
print(command, movement)
@sio.on("movement command")
def test_command_web(sid, commands, data, value=None):
test_command(sid, commands, data=data, value=value)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Pass in server device IP")
parser.add_argument(
"--ip",
help="Server device (robot) IP. Default is 0.0.0.0",
type=str,
default="0.0.0.0",
)
parser.add_argument(
"--backend",
help="Which backend to use: habitat (default), hellorobot",
type=str,
default='habitat',
)
args = parser.parse_args()
ip = args.ip
backend = args.backend
print("Connecting to robot at ip: ", ip)
if backend == 'habitat':
from droidlet.lowlevel.locobot.locobot_mover import LoCoBotMover
mover = LoCoBotMover(ip=ip, backend='habitat')
elif backend == 'hellorobot':
from droidlet.lowlevel.hello_robot.hello_robot_mover import HelloRobotMover
mover = HelloRobotMover(ip=ip)
print("Mover is ready to be operated")
log_settings = {
"image_resolution": 512, # pixels
"image_quality": 10, # from 10 to 100, 100 being best
}
all_points = None
all_colors = None
first = True
prev_stg = None
path_count = 0
start_time = time.time_ns()
fps_freq = 1 # displays the frame rate every 1 second
counter = 0
while True:
counter += 1
iter_time = time.time_ns() - start_time
if float(iter_time) / 1e9 > fps_freq :
print("FPS: ", round(counter / (float(iter_time) / 1e9), 1), " ", int(iter_time / 1e6 / counter), "ms")
counter = 0
start_time = time.time_ns()
base_state = mover.get_base_pos_in_canonical_coords()
sio.emit("image_settings", log_settings)
resolution = log_settings["image_resolution"]
quality = log_settings["image_quality"]
# this goes from 21ms to 120ms
rgb_depth = mover.get_rgb_depth()
# this takes about 1.5 to 2 fps
serialized_image = rgb_depth.to_struct(resolution, quality)
sio.emit("rgb", serialized_image["rgb"])
sio.emit("depth", {
"depthImg": serialized_image["depth_img"],
"depthMax": serialized_image["depth_max"],
"depthMin": serialized_image["depth_min"],
})
points, colors = rgb_depth.ptcloud.reshape(-1, 3), rgb_depth.rgb.reshape(-1, 3)
colors = colors / 255.
if all_points is None:
all_points = points
all_colors = colors
else:
all_points = np.concatenate((all_points, points), axis=0)
all_colors = np.concatenate((all_colors, colors), axis=0)
opcd = o3d.geometry.PointCloud()
opcd.points = o3d.utility.Vector3dVector(all_points)
opcd.colors = o3d.utility.Vector3dVector(all_colors)
opcd = opcd.voxel_down_sample(0.05)
# # remove the rooftop / ceiling points in the point-cloud to make it easier to see the robot in the visualization
# crop_bounds = o3d.utility.Vector3dVector([
# [-1000., -20., -1000.],
# [1000., 20., 1000.0],
# ])
# opcd = opcd.crop(
# o3d.geometry.AxisAlignedBoundingBox.create_from_points(
# crop_bounds,
# )
# )
all_points = np.asarray(opcd.points)
all_colors = np.asarray(opcd.colors)
if first:
cmd = 'add'
first = False
else:
cmd = 'replace'
o3dviz.put('pointcloud', cmd, opcd)
# Plot the robot
x, y, yaw = base_state.tolist()
robot_orientation = o3d.geometry.TriangleMesh.create_arrow(cylinder_radius=.05,
cone_radius=.075,
cylinder_height = .50,
cone_height = .4,
resolution=20)
robot_orientation.compute_vertex_normals()
robot_orientation.paint_uniform_color([1.0, 0.5, 0.1])
robot_orientation.translate([y, -x, 0.], relative=False)
# make the cylinder representing the robot to be parallel to the floor
robot_orientation.rotate(o3d.geometry.get_rotation_matrix_from_axis_angle([0, math.pi/2, 0]))
# rotate the cylinder by the robot orientation
if yaw != 0:
robot_orientation.rotate(o3d.geometry.get_rotation_matrix_from_axis_angle([0, 0, yaw]))
o3dviz.put('bot_orientation', cmd, robot_orientation)
robot_base = o3d.geometry.TriangleMesh.create_cylinder(radius=.1,
height=1,)
robot_base.translate([y, -x, 0.1], relative=False)
robot_base.compute_vertex_normals()
robot_base.paint_uniform_color([1.0, 1.0, 0.1])
o3dviz.put('bot_base', cmd, robot_base)
# red = x, green = y, blue = z
axis = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0, origin=np.array([0., 0., 0.]))
axis.compute_vertex_normals()
o3dviz.put('axis', cmd, axis)
# start the SLAM
if backend == 'habitat':
mover.explore((19,19,0))
# get the SLAM goals
goal_loc, stg = None, None # mover.bot.get_slam_goal()
# plot the final goal
if goal_loc is not None:
goal_x, goal_y, goal_z = goal_loc
cone = o3d.geometry.TriangleMesh.create_cylinder(radius=.2,
height=3.,)
cone.translate([goal_x, goal_y, 0.4], relative=False)
cone.compute_vertex_normals()
cone.paint_uniform_color([0.0, 1.0, 1.0])
o3dviz.put('goal_cone', cmd, cone)
# plot the short term goal in yellow and the path in green
if stg is not None:
stg_x, stg_y = stg
cone = o3d.geometry.TriangleMesh.create_cylinder(radius=.2,
height=3.,)
cone.translate([stg_x, stg_y, 1.4], relative=False)
cone.compute_vertex_normals()
cone.paint_uniform_color([1.0, 1.0, 0.0])
o3dviz.put('stg', cmd, cone)
if prev_stg is None:
prev_stg = [y, -x]
cur_stg = [stg_x, stg_y]
arrow_length = distance.euclidean(cur_stg, prev_stg)
if arrow_length > 0.0001:
path = o3d.geometry.TriangleMesh.create_arrow(cylinder_radius=.03,
cone_radius=.04,
cylinder_height = arrow_length / 2,
cone_height = arrow_length / 2,)
path.compute_vertex_normals()
path.paint_uniform_color([0.0, 1.0, 0.0])
path.translate([prev_stg[0], prev_stg[1], 0.2], relative=False)
path.rotate(o3d.geometry.get_rotation_matrix_from_axis_angle([0, math.pi/2, 0]))
path.rotate(o3d.geometry.get_rotation_matrix_from_axis_angle([0, 0, yaw]))
o3dviz.put('short_term_goal_path_{}'.format(path_count), 'add', path)
path_count = path_count + 1
prev_stg = cur_stg
# # get the obstacle map and plot it
# obstacles = mover.bot.get_map()
# obstacles = np.asarray(obstacles)
# obstacles = np.concatenate((-obstacles[:, [1]], -obstacles[:, [0]], np.zeros((obstacles.shape[0], 1))), axis=1)
# obspcd = o3d.geometry.PointCloud()
# obspcd.points = o3d.utility.Vector3dVector(obstacles)
# obspcd.paint_uniform_color([1.0, 0., 0.])
# obsvox = o3d.geometry.VoxelGrid.create_from_point_cloud(obspcd, 0.03)
# o3dviz.put('obstacles', cmd, obsvox)
time.sleep(0.001)
```
#### File: droidlet/dashboard/o3dviz.py
```python
import os
import time
os.environ["WEBRTC_IP"] = "0.0.0.0"
os.environ["WEBRTC_PORT"] = "8889"
import open3d as o3d
o3d.visualization.webrtc_server.enable_webrtc()
from open3d.visualization import O3DVisualizer, gui
import threading
import queue
class O3dViz(threading.Thread):
def __init__(self, *args, **kwargs):
self.q = queue.Queue()
super().__init__(*args, **kwargs)
def put(self, name, command, obj):
# pass
self.q.put([name, command, obj])
def run(self):
app = gui.Application.instance
app.initialize()
w = O3DVisualizer("o3dviz", 1024, 768)
w.set_background((0.0, 0.0, 0.0, 1.0), None)
app.add_window(w)
reset_camera = False
while True:
app.run_one_tick()
time.sleep(0.001)
try:
name, command, geometry = self.q.get_nowait()
try:
if command == 'remove':
w.remove_geometry(name)
elif command == 'replace':
w.remove_geometry(name)
w.add_geometry(name, geometry)
elif command == 'add':
w.add_geometry(name, geometry)
except:
print("failed to add geometry to scene")
if not reset_camera:
# Look at A from camera placed at B with Y axis
# pointing at C
# useful for pyrobot co-ordinates
w.scene.camera.look_at([1, 0, 0],
[-5, 0, 1],
[0, 0, 1])
# useful for initial camera co-ordinates
# w.scene.camera.look_at([0, 0, 1],
# [0, 0, -1],
# [0, -1, 0])
reset_camera = True
w.post_redraw()
except queue.Empty:
pass
o3dviz = O3dViz()
def start():
o3dviz.start()
```
#### File: interpreter/robot/tasks.py
```python
import time
import math
import logging
import numpy as np
import os
import math
from droidlet.memory.robot.loco_memory_nodes import DetectedObjectNode
from droidlet.task.task import Task, BaseMovementTask
from droidlet.memory.memory_nodes import TaskNode
from droidlet.interpreter.robot.objects import DanceMovement
from droidlet.lowlevel.robot_mover_utils import (
get_move_target_for_point,
ARM_HEIGHT,
get_camera_angles,
TrajectoryDataSaver,
visualize_examine,
get_step_target_for_straightline_move,
ExaminedMap,
CAMERA_HEIGHT,
get_circular_path,
)
# FIXME store dances, etc.
class Dance(Task):
def __init__(self, agent, task_data, featurizer=None):
super().__init__(agent)
# movement should be a Movement object from dance.py
self.movement = DanceMovement(self.agent, None)
self.movement_type = task_data.get("movement_type", None)
TaskNode(self.agent.memory, self.memid).update_task(task=self)
@Task.step_wrapper
def step(self):
self.interrupted = False
if self.movement_type == "wave":
self.movement.wave()
elif not self.movement: # default move
mv = Move(self.agent, {"target": [-1000, -1000, -1000], "approx": 2})
self.add_child_task(mv)
self.finished = True
def torad(deg):
if deg:
return deg * math.pi / 180
#### TODO, FIXME!:
#### merge Look, Point, Turn into dancemove; on mc side too
class Look(Task):
def __init__(self, agent, task_data):
super().__init__(agent)
self.task_data = task_data
assert(
task_data.get("target") or
task_data.get("pitch") or
task_data.get("yaw") or
task_data.get("relative_yaw") or
task_data.get("relative_pitch")
)
self.command_sent = False
TaskNode(agent.memory, self.memid).update_task(task=self)
@Task.step_wrapper
def step(self):
self.finished = False
self.interrupted = False
if not self.command_sent:
if self.task_data.get("target"):
status = self.agent.mover.look_at(self.task_data["target"])
if self.task_data.get("pitch") or self.task_data.get("yaw"):
status = self.agent.mover.set_look(
torad(self.task_data.get("yaw")),
torad(self.task_data.get("pitch"))
)
if self.task_data.get("relative_pitch") or self.task_data.get("relative_yaw"):
status = self.agent.mover.relative_pan_tilt(
torad(self.task_data.get("relative_yaw")),
torad(self.task_data.get("relative_pitch"))
)
self.command_sent = True
if status == "finished":
self.finished = True
else:
self.finished = self.agent.mover.bot_step()
def __repr__(self):
if self.task_data.get("target"):
target = self.task_data.get["target"]
return "<Look at {} {} {}>".format(target[0], target[1], target[2])
else:
return "<Look: {}".format(self.task_data)
class Point(Task):
def __init__(self, agent, task_data):
super().__init__(agent)
self.target = np.asarray(task_data["target"])
print(f'type {type(self.target), self.target}')
self.steps = ["not_started"] * 2
TaskNode(agent.memory, self.memid).update_task(task=self)
def get_pt_from_region(self, region):
assert (
len(region) == 6
), "Region list has less than 6 elements (minx, miny, minz, maxx, maxy, maxz)"
return region[:3] # just return the min xyz for now
@Task.step_wrapper
def step(self):
self.interrupted = False
print(f'target {self.target}')
pt = self.get_pt_from_region(self.target.tolist())
logging.info(f"Calling bot to Point at {pt}")
logging.info(f"base pos {self.agent.mover.get_base_pos_in_canonical_coords()}")
# Step 1 - Move close to the object.
if self.steps[0] == "not_started":
base_pos = self.agent.mover.get_base_pos_in_canonical_coords()
target = get_move_target_for_point(base_pos, pt)
logging.info(f"Move Target for point {target}")
self.add_child_task(Move(self.agent, {"target": target}))
self.steps[0] = "finished"
return
# Step 2 - Turn so that the object is in FOV
if self.steps[0] == "finished" and self.steps[1] == "not_started":
base_pos = self.agent.mover.get_base_pos_in_canonical_coords()
yaw_rad, _ = get_camera_angles([base_pos[0], ARM_HEIGHT, base_pos[1]], pt)
self.add_child_task(Turn(self.agent, {"yaw": yaw_rad}))
self.steps[1] = "finished"
return
# Step 3 - Point at the object
if self.steps[0] == "finished" and self.steps[1] == "finished":
status = self.agent.mover.point_at(pt)
if status == "finished":
self.finished = True
def __repr__(self):
return "<Point at {}>".format(self.target)
class Move(BaseMovementTask):
def __init__(self, agent, task_data, featurizer=None):
super().__init__(agent, task_data)
self.target = np.array(task_data["target"])
self.is_relative = task_data.get("is_relative", 0)
self.path = None
self.command_sent = False
TaskNode(agent.memory, self.memid).update_task(task=self)
def target_to_memory(self, target):
return [target[0], 0, target[1]]
@Task.step_wrapper
def step(self):
self.interrupted = False
self.finished = False
if not self.command_sent:
logging.info("calling move with : %r" % (self.target.tolist()))
self.command_sent = True
if self.is_relative:
self.agent.mover.move_relative([self.target.tolist()])
else:
self.agent.mover.move_absolute([self.target.tolist()])
else:
self.finished = self.agent.mover.bot_step()
def __repr__(self):
return "<Move {}>".format(self.target)
class Turn(Task):
def __init__(self, agent, task_data):
super().__init__(agent)
self.yaw = task_data.get("yaw") or task_data.get("relative_yaw")
self.command_sent = False
TaskNode(agent.memory, self.memid).update_task(task=self)
@Task.step_wrapper
def step(self):
self.interrupted = False
self.finished = False
if not self.command_sent:
self.command_sent = True
self.agent.mover.turn(self.yaw)
else:
self.finished = self.agent.mover.bot_step()
def __repr__(self):
return "<Turn {} degrees>".format(self.yaw)
# TODO handle case where agent already has item in inventory (pure give)
class Get(Task):
def __init__(self, agent, task_data):
super().__init__(agent)
# get target should be a ReferenceObjectNode memid
self.get_target = task_data["get_target"]
self.give_target = task_data["give_target"]
# steps take values "not_started", "started", "complete"
if not self.give_target:
# TODO all movements simultaneous- change look while driving
# approach_pickup, look_at_object, grab
self.steps = ["not_started"] * 3
else:
# approach_pickup, look_at_object, grab, approach_dropoff, give/drop
self.steps = ["not_started"] * 5
TaskNode(agent.memory, self.memid).update_task(task=self)
def get_mv_target(self, get_or_give="get", end_distance=0.35):
"""figure out the location where agent should move to in order to get or give object in global frame
all units are in metric unit
Args:
get_or_give (str, optional): whether to get or give object. Defaults to "get".
end_distance (float, optional): stand end_distance away from the goal in meter. Defaults to 0.35.
Returns:
[tuple]: (x,y,theta) location the agent should move to, in global co-ordinate system
"""
agent_pos = np.array(self.agent.mover.get_base_pos())[:2]
if get_or_give == "get":
target_memid = self.get_target
else:
target_memid = self.give_target
target_pos = self.agent.memory.get_mem_by_id(target_memid).get_pos()
target_pos = np.array((target_pos[0], target_pos[2]))
diff = target_pos - agent_pos
distance = np.linalg.norm(diff)
# FIXME add a check to make sure not already there
xz = agent_pos + (distance - end_distance) * diff / distance
# TODO: Check if yaw s right
target_yaw = np.arctan2(diff[1], diff[0])
received_yaw = False
while not received_yaw:
try:
target_yaw += self.agent.mover.get_base_pos()[2]
received_yaw = True
except:
time.sleep(0.1)
return (xz[0], xz[1], target_yaw)
@Task.step_wrapper
def step(self):
agent = self.agent
self.interrupted = False
self.finished = False
# move to object to be picked up
if self.steps[0] == "not_started":
# check if already holding target object for pure give, when object is grasped
# its added to memory with tag "_in_inventory"
if self.get_target in agent.memory.get_memids_by_tag("_in_inventory"):
self.steps[0] = "finished"
self.steps[1] = "finished"
self.steps[2] = "finished"
else:
target = self.get_mv_target(get_or_give="get")
self.add_child_task(Move(agent, {"target": target}))
# TODO a loop? otherwise check location/graspability instead of just assuming?
self.steps[0] = "finished"
return
# look at the object directly
if self.steps[0] == "finished" and self.steps[1] == "not_started":
target_pos = agent.memory.get_mem_by_id(self.get_target).get_pos()
self.add_child_task(Look(agent, {"target": target_pos}))
self.steps[1] = "finished"
return
# grab it
if self.steps[1] == "finished" and self.steps[2] == "not_started":
self.add_child_task(AutoGrasp(agent, {"target": self.get_target}))
self.steps[2] = "finished"
return
if len(self.steps) == 3:
self.finished = True
return
# go to the place where you are supposed to drop off the item
if self.steps[3] == "not_started":
target = self.get_mv_target(agent, get_or_give="give")
self.add_child_task(Move(agent, {"target": target}))
# TODO a loop? otherwise check location/graspability instead of just assuming?
self.steps[3] = "finished"
return
# drop it
if self.steps[3] == "finished":
self.add_child_task(Drop(agent, {"object": self.get_target}))
self.finished = True
return
def __repr__(self):
return "<get {}>".format(self.get_target)
class AutoGrasp(Task):
"""thin wrapper for Dhiraj' grasping routine."""
def __init__(self, agent, task_data):
super().__init__(agent)
# this is a ref object memid
self.target = task_data["target"]
self.command_sent = False
TaskNode(agent.memory, self.memid).update_task(task=self)
@Task.step_wrapper
def step(self):
self.interrupted = False
self.finished = False
if not self.command_sent:
self.command_sent = True
self.agent.mover.grab_nearby_object()
else:
self.finished = self.agent.mover.bot_step()
# TODO check that the object in the gripper is actually the object we meant to pick up
# TODO deal with failure cases
# TODO tag this in the grip task, not here
if self.finished:
if self.agent.mover.is_object_in_gripper():
self.agent.memory.tag(self.target, "_in_inventory")
class Drop(Task):
"""drop whatever is in hand."""
def __init__(self, agent, task_data):
super().__init__(agent)
# currently unused, we can expand this soon?
self.object_to_drop = task_data.get("object", None)
self.command_sent = False
TaskNode(agent.memory, self.memid).update_task(task=self)
@Task.step_wrapper
def step(self):
agent = self.agent
self.interrupted = False
self.finished = False
if not self.command_sent:
logging.info("Dropping the object in hand")
self.command_sent = True
agent.mover.drop()
else:
self.finished = agent.mover.bot_step() and not agent.mover.is_object_in_gripper()
if self.finished:
agent.memory.untag(self.object_to_drop, "_in_inventory")
if self.object_to_drop is None:
# assumed there is only one object with tag "_in_inventory"
for mmid in agent.memory.get_memids_by_tag("_in_inventory"):
agent.memory.untag(mmid, "_in_inventory")
class TrajectorySaverTask(Task):
def __init__(self, agent, task_data):
super().__init__(agent, task_data)
self.save_data = task_data.get('save_data', False)
self.data_path = task_data.get('data_path', 'default')
self.dbg_str = 'None'
if self.save_data:
self.data_saver = TrajectoryDataSaver(os.path.join(agent.opts.data_store_path, self.data_path))
self.data_savers = [self.data_saver]
parent_data_saver = task_data.get('parent_data_saver', None)
if parent_data_saver is not None:
self.data_savers.append(parent_data_saver)
TaskNode(agent.memory, self.memid).update_task(task=self)
def save_rgb_depth_seg(self):
rgb, depth, segm = self.agent.mover.get_rgb_depth_segm()
# store depth in mm
depth *= 1e3
depth[depth > np.power(2, 16) - 1] = np.power(2, 16) - 1
depth = depth.astype(np.uint16)
pos = self.agent.mover.get_base_pos()
for data_saver in self.data_savers:
data_saver.set_dbg_str(self.dbg_str)
data_saver.save(rgb, depth, segm, pos)
@Task.step_wrapper
def step(self):
if self.save_data:
self.save_rgb_depth_seg()
def __repr__(self):
return "<TrajectorySaverTask {}>".format(self.target)
class CuriousExplore(TrajectorySaverTask):
"""use slam to explore environemt, but also examine detections"""
def __init__(self, agent, task_data):
super().__init__(agent, task_data)
self.steps = ["not_started"] * 2
self.task_data = task_data
self.goal = task_data.get("goal", (19,19,0))
self.init_curious_logger()
self.agent = agent
self.objects_examined = 0
self.save_data = task_data.get('save_data')
print(f'CuriousExplore task_data {task_data}')
TaskNode(agent.memory, self.memid).update_task(task=self)
def init_curious_logger(self):
self.logger = logging.getLogger('curious')
self.logger.setLevel(logging.INFO)
fh = logging.FileHandler(f"curious_explore_{'_'.join([str(x) for x in self.goal])}.log", 'w')
fh.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(filename)s:%(lineno)s - %(funcName)s(): %(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.logger.addHandler(ch)
self.logger.info(f'CuriousExplore task_data {self.task_data}')
@Task.step_wrapper
def step(self):
super().step()
self.interrupted = False
self.finished = False
if self.steps[0] == "not_started":
self.logger.info(f'exploring goal {self.goal}')
self.agent.mover.explore(self.goal)
self.dbg_str = "Explore"
self.steps[0] = "finished"
return
# execute a examine maneuver
if self.steps[0] == "finished" and self.steps[1] == "not_started":
# Get a list of current detections
objects = DetectedObjectNode.get_all(self.agent.memory)
pos = self.agent.mover.get_base_pos_in_canonical_coords()
# pick all from unexamined, in-sight object
def pick_random_in_sight(objects, base_pos):
for x in objects:
if ExaminedMap.can_examine(x):
# check for line of sight and if within a certain distance
yaw_rad, _ = get_camera_angles([base_pos[0], CAMERA_HEIGHT, base_pos[1]], x['xyz'])
dist = np.linalg.norm(base_pos[:2]-[x['xyz'][0], x['xyz'][2]])
if abs(yaw_rad - base_pos[2]) <= math.pi/4 and dist <= 3:
self.logger.info(f"Exploring eid {x['eid']}, {x['label']} next, dist {dist}")
return x
return None
target = pick_random_in_sight(objects, pos)
if target is not None:
self.logger.info(f"CuriousExplore Target {target['eid'], target['label'], target['xyz']}, robot pos {pos}")
ExaminedMap.update(target)
self.dbg_str = f"Examine {str(target['eid']) + '_' + str(target['label'])} xyz {str(np.round(target['xyz'],3))}"
if os.getenv('HEURISTIC') == 'straightline':
examine_heuristic = ExamineDetectionStraightline
else:
examine_heuristic = ExamineDetectionCircle
self.add_child_task(examine_heuristic(
self.agent, {
"target": target,
"save_data": self.save_data,
"root_data_path": f"{self.task_data['data_path']}",
"data_path": f"{self.task_data['data_path']}/{str(self.objects_examined)}",
"dbg_str": self.dbg_str,
"parent_data_saver": self.data_savers[0],
"logger": self.logger,
}
)
)
self.objects_examined += 1
self.steps[1] = "finished"
return
else:
self.finished = self.agent.mover.nav.is_done_exploring().value
if not self.finished:
self.steps = ["not_started"] * 2
else:
self.logger.info(f"Exploration finished!")
def __repr__(self):
return "<CuriousExplore>"
class ExamineDetectionStraightline(TrajectorySaverTask):
"""Examine a detection"""
def __init__(self, agent, task_data):
super().__init__(agent, task_data)
self.task_data = task_data
self.target = task_data['target']
self.frontier_center = np.asarray(self.target['xyz'])
self.agent = agent
self.last_base_pos = None
self.robot_poses = []
self.dbg_str = task_data.get('dbg_str')
TaskNode(agent.memory, self.memid).update_task(task=self)
@Task.step_wrapper
def step(self):
super().step()
self.interrupted = False
self.finished = False
logger = logging.getLogger('curious')
base_pos = self.agent.mover.get_base_pos_in_canonical_coords()
self.robot_poses.append(base_pos)
dist = np.linalg.norm(base_pos[:2]-np.asarray([self.frontier_center[0], self.frontier_center[2]]))
logger.info(f"Deciding examination, dist = {dist}")
d = 1
if self.last_base_pos is not None:
d = np.linalg.norm(base_pos[:2] - self.last_base_pos[:2])
# logger.info(f"Distance moved {d}")
if (base_pos != self.last_base_pos).any() and dist > 0.2 and d > 0:
tloc = get_step_target_for_straightline_move(base_pos, self.frontier_center)
logger.debug(f"get_step_target_for_straight_move \
\nx, z, yaw = {base_pos},\
\nxf, zf = {self.frontier_center[0], self.frontier_center[2]} \
\nx_move, z_move, yaw_move = {tloc}")
logging.info(f"Current Pos {base_pos}")
logging.info(f"Move Target for Examining {tloc}")
logging.info(f"Distance being moved {np.linalg.norm(base_pos[:2]-tloc[:2])}")
self.add_child_task(Move(self.agent, {"target": tloc}))
# visualize tloc, frontier_center, obstacle_map
if os.getenv('VISUALIZE_EXAMINE', 'False') == 'True':
visualize_examine(
self.agent,
self.robot_poses,
self.frontier_center,
self.target['label'],
self.agent.mover.get_obstacles_in_canonical_coords(),
self.task_data['root_data_path'],
)
self.last_base_pos = base_pos
return
else:
logger.info(f"Finished Examination")
self.finished = self.agent.mover.bot_step()
def __repr__(self):
return "<ExamineDetectionStraightline {}>".format(self.target['label'])
class ExamineDetectionCircle(TrajectorySaverTask):
"""Examine a detection"""
def __init__(self, agent, task_data):
super().__init__(agent, task_data)
self.task_data = task_data
self.target = task_data['target']
self.frontier_center = np.asarray(self.target['xyz'])
self.agent = agent
self.steps = 0
self.robot_poses = []
self.last_base_pos = None
self.dbg_str = task_data.get('dbg_str')
self.logger = task_data.get('logger')
base_pos = self.agent.mover.get_base_pos_in_canonical_coords()
self.pts = get_circular_path(self.frontier_center, base_pos, radius=0.7, num_points=40)
self.logger.info(f'{len(self.pts)} pts on cicle {self.pts}')
TaskNode(agent.memory, self.memid).update_task(task=self)
@Task.step_wrapper
def step(self):
super().step()
self.interrupted = False
self.finished = False
base_pos = self.agent.mover.get_base_pos_in_canonical_coords()
if self.steps > 0: # without any steps, the robot isn't on the circle of inspection
self.robot_poses.append(base_pos)
d = 1
if self.last_base_pos is not None:
d = np.linalg.norm(base_pos[:2] - self.last_base_pos[:2])
self.logger.info(f"Distance moved {d}")
if (base_pos != self.last_base_pos).any() and self.steps < len(self.pts):
tloc = self.pts[self.steps]
self.steps += 1
self.logger.info(f'step {self.steps} moving to {tloc} Current Pos {base_pos}')
self.add_child_task(Move(self.agent, {"target": tloc}))
self.last_base_pos = base_pos
# visualize tloc, frontier_center, obstacle_map
if os.getenv('VISUALIZE_EXAMINE', 'False') == 'True':
visualize_examine(
self.agent,
self.robot_poses,
self.frontier_center,
self.target['label'],
self.agent.mover.get_obstacles_in_canonical_coords(),
self.task_data['root_data_path'],
self.pts,
)
return
else:
self.logger.info(f"Finished Examination")
self.finished = self.agent.mover.bot_step()
def __repr__(self):
return "<ExamineDetectionCircle {}>".format(self.target['label'])
class Explore(TrajectorySaverTask):
"""use slam to explore environemt"""
def __init__(self, agent, task_data):
super().__init__(agent, task_data)
self.command_sent = False
self.agent = agent
self.goal = task_data.get("goal")
TaskNode(agent.memory, self.memid).update_task(task=self)
@Task.step_wrapper
def step(self):
super().step()
self.interrupted = False
self.finished = False
if not self.finished:
self.agent.mover.explore(self.goal)
self.finished = self.agent.mover.nav.is_done_exploring().value
```
#### File: hitl/utils/hitl_utils.py
```python
import logging
import os
import re
import CloudFlare
import boto3
def generate_batch_id():
"""
Generate a unique id for each hitl run
"""
import datetime
dt = datetime.datetime.now()
return int(dt.strftime("%Y%m%d%H%M%S"))
def deregister_dashboard_subdomain(batch_id):
"""
Deregister all subdomains of a given batch on craftassist.io
"""
if (
os.getenv("CLOUDFLARE_TOKEN")
and os.getenv("CLOUDFLARE_ZONE_ID")
and os.getenv("CLOUDFLARE_EMAIL")
):
logging.info("Deresigister subdomains on craftassist.io")
cf_token = os.getenv("CLOUDFLARE_TOKEN")
zone_id = os.getenv("CLOUDFLARE_ZONE_ID")
cf_email = os.getenv("CLOUDFLARE_EMAIL")
cf = CloudFlare.CloudFlare(email=cf_email, token=cf_token)
dns_records = cf.zones.dns_records.get(zone_id)
for record in dns_records:
print(f'{record["name"]} pattern : {batch_id}')
if re.match(fr"dashboard-{batch_id}-\d+.craftassist.io", record["name"]):
print(f"matched cf record to be deleted: {record['name']}")
cf.zones.dns_records.delete(zone_id, record["id"])
logging.debug(f'Deleted cf dns record: {record["name"]}')
print(f'Deleted cf dns record: {record["name"]}')
def dedup_commands(command_list):
"""
Deduplicate a command list.
Now it only removes repeated commands.
"""
cmd_set = set()
deduped_cmd_list = []
for command in command_list:
if command.lower() not in cmd_set:
cmd_set.add(command.lower())
deduped_cmd_list.append(command)
return deduped_cmd_list
def examine_hit(hit_id):
"""
Examine all assignments of a given HIT
"""
access_key = os.getenv("MTURK_AWS_ACCESS_KEY_ID")
secret_key = os.getenv("MTURK_AWS_SECRET_ACCESS_KEY")
aws_region = os.getenv("MTURK_AWS_REGION", default="us-east-1")
dev_flag = None
if dev_flag:
MTURK_URL = "https://mturk-requester-sandbox.{}.amazonaws.com".format(aws_region)
else:
MTURK_URL = "https://mturk-requester.{}.amazonaws.com".format(aws_region)
mturk = boto3.client(
"mturk",
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
region_name=aws_region,
endpoint_url=MTURK_URL,
)
worker_results = mturk.list_assignments_for_hit(
HITId=hit_id, AssignmentStatuses=["Submitted"]
)
print(worker_results["NumResults"])
print(worker_results["Assignments"])
def delete_all_mturk_hits():
"""
Delete all HITs of a given account.
Please use it with caution.
"""
import os
import boto3
from datetime import datetime
access_key = os.getenv("MTURK_AWS_ACCESS_KEY_ID")
secret_key = os.getenv("MTURK_AWS_SECRET_ACCESS_KEY")
aws_region = os.getenv("MTURK_AWS_REGION", default="us-east-1")
MTURK_URL = "https://mturk-requester-sandbox.{}.amazonaws.com".format(aws_region)
mturk = boto3.client(
"mturk",
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
region_name=aws_region,
endpoint_url=MTURK_URL,
)
all_hits = mturk.list_hits()["HITs"]
hit_ids = [item["HITId"] for item in all_hits]
# This is slow but there's no better way to get the status of pending HITs
for hit_id in hit_ids:
# Get HIT status
status = mturk.get_hit(HITId=hit_id)["HIT"]["HITStatus"]
try:
response = mturk.update_expiration_for_hit(HITId=hit_id, ExpireAt=datetime(2015, 1, 1))
mturk.delete_hit(HITId=hit_id)
except:
pass
print(f"Hit {hit_id}, status: {status}")
if __name__ == "__main__":
# pass
for i in range(100):
deregister_dashboard_subdomain(20211014214358)
# examine_hit("34YWR3PJ2AD51SZWORZ4M41QBOG0XV")
```
#### File: fbrp/runtime/test_conda.py
```python
import os
import unittest
from fbrp.life_cycle import State
from fbrp.process import ProcDef
from fbrp.runtime.conda import CondaEnv, Launcher
from unittest import IsolatedAsyncioTestCase
from unittest import mock
from unittest.mock import call, patch, mock_open
class TestCondaEnv(unittest.TestCase):
def test_merge_and_fix_pip_1(self):
a = CondaEnv(
channels=["conda-forge", "robostack", "pytorch"],
dependencies=["ros-noetic-genpy"],
)
b = CondaEnv(
channels=["pytorch", "nvidia"],
dependencies=["pytorch", "numpy", "dataclasses"],
)
c = CondaEnv.merge(a, b)
assert len(c.channels) == 4
assert c.channels == sorted(["conda-forge", "robostack", "pytorch", "nvidia"])
assert len(c.dependencies) == 4
assert c.dependencies == sorted(["pytorch", "ros-noetic-genpy", "numpy", "dataclasses"])
assert "pip" not in c.dependencies
c.fix_pip()
assert "pip" not in c.dependencies
def test_merge_and_fix_pip_2(self):
a = CondaEnv(
channels=["conda-forge", "robostack", "pytorch"],
dependencies=["ros-noetic-genpy", {"pip": ["scipy", "cuda110", "pytorch"]}],
)
b = CondaEnv(
channels=["pytorch", "nvidia"],
dependencies=["pytorch", "numpy", "dataclasses"],
)
c = CondaEnv.merge(a, b)
assert len(c.channels) == 4
assert c.channels == sorted(["conda-forge", "robostack", "pytorch", "nvidia"])
assert len(c.dependencies) == 5
ref_deps = sorted(["pytorch", "ros-noetic-genpy", "numpy", "dataclasses"])
list_deps = []
for dep in c.dependencies:
if type(dep) == dict:
assert sorted(dep["pip"]) == sorted(["scipy", "cuda110", "pytorch"])
else:
assert dep in ref_deps
list_deps.append(dep)
assert "pip" not in c.dependencies
c.fix_pip()
assert "pip" in c.dependencies
class TestLauncher(IsolatedAsyncioTestCase):
@patch("builtins.open", new_callable=mock_open, read_data="env_var=data" + "\0")
@patch("argparse.Namespace")
async def test_activate_conda_env(self, mock_namespace, mock_file):
proc_def = ProcDef(
name="test_conda",
root=None,
rule_file=None,
runtime="BaseRuntime",
cfg={},
deps=[],
env={},
)
launcher = Launcher(
name="test_conda",
run_command=["python3", "alice.py"],
proc_def=proc_def,
args=mock_namespace,
)
os_env_patch = mock.patch.dict(os.environ, {"my_path": "path"})
os_env_patch.start()
conda_env = await launcher.activate_conda_env()
mock_file.assert_called_with(f"/tmp/fbrp_conda_test_conda.env")
assert len(conda_env) == 1
self.assertDictEqual(conda_env, {"env_var": "data"})
os_env_patch.stop()
@patch("fbrp.life_cycle.set_state")
@patch("fbrp.runtime.conda.Launcher.gather_cmd_outputs")
@patch("fbrp.runtime.conda.Launcher.run_cmd_in_env")
@patch("fbrp.runtime.conda.Launcher.activate_conda_env")
@patch("argparse.Namespace")
async def test_run(
self,
mock_namespace,
mock_activate_conda_env,
mock_run_cmd_in_env,
mock_gather_cmd_outputs,
mock_set_state,
):
proc_def = ProcDef(
name="test_conda",
root=None,
rule_file=None,
runtime="BaseRuntime",
cfg={},
deps=[],
env={},
)
launcher = Launcher(
name="test_conda",
run_command=["python3", "alice.py"],
proc_def=proc_def,
args=mock_namespace,
)
await launcher.run()
mock_activate_conda_env.assert_called_once()
mock_run_cmd_in_env.assert_called_once()
mock_gather_cmd_outputs.assert_called_once()
assert mock_set_state.call_count == 2
mock_set_state.assert_has_calls(
[call("test_conda", State.STARTING), call("test_conda", State.STARTED)]
)
@patch("fbrp.life_cycle.set_state")
@patch("fbrp.runtime.conda.Launcher.exit_cmd_in_env")
@patch("argparse.Namespace")
async def test_death_handler(
self, mock_namespace, mock_exit_cmd_in_env, mock_set_state
):
mock_exit_cmd_in_env.return_value = 0
proc_def = ProcDef(
name="test_conda",
root=None,
rule_file=None,
runtime="BaseRuntime",
cfg={},
deps=[],
env={},
)
launcher = Launcher(
name="test_conda",
run_command=["python3", "alice.py"],
proc_def=proc_def,
args=mock_namespace,
)
await launcher.death_handler()
mock_exit_cmd_in_env.assert_called_once()
mock_set_state.assert_called_once_with(
"test_conda", State.STOPPED, return_code=0
)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "Jih00Kim/inclass_p1",
"score": 4
} |
#### File: Jih00Kim/inclass_p1/adder.py
```python
def add(a, b):
a = input('num1')
b= input('num2'
return a + b
``` |
{
"source": "Jihackstory/voice_activity_detection",
"score": 2
} |
#### File: Jihackstory/voice_activity_detection/train.py
```python
import torch
import numpy as np
import os
import torch.nn as nn
import torch.optim as optim
# from torchsummary import summary as summary_
from data_tools import prepare_input_img, check_length
import model_nn as model
def train_model(params):
path = params['path']
model_path = params['model_path']
batch_size = params['batch_size']
num_epochs = params['epochs']
train_val_ratio = params['train_val_ratio']
early_stopping = params['baseline_val_loss']
data = np.expand_dims(np.load(os.path.join(path, 'train_data.npy')), 1)
label = np.load(os.path.join(path, 'train_label.npy'))
train_loader, val_loader = prepare_input_img(data, label, train_val_ratio, batch_size)
num_total_batch, str_total_batch, str_epochs = check_length(train_loader, num_epochs)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
net = model.Resnet1D(params).to(device)
# check the structure of the model
# summary_(net, (1, 65, 16), batch_size=32)
train_losses, valid_losses = [], []
avg_train_losses, avg_valid_losses = [], []
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.Adam(net.parameters(), lr=params['learning_rate'])
print('\nstart the training\n')
for epoch in range(1, num_epochs+1):
# training step
net.train()
for batch, (data, target) in enumerate(train_loader, 1):
optimizer.zero_grad()
out = net(data.to(device))
loss = criterion(out, target.to(device))
loss.backward()
optimizer.step()
train_losses.append(loss.item())
print_batch_msg = (f'\r[batch : {batch:>{str_total_batch}}/{num_total_batch:>{str_total_batch}} ]')
print(print_batch_msg, end=' ')
# validation step
total, correct = 0, 0
net.eval()
for data, target in val_loader:
target = target.to(device)
out = net(data.to(device))
loss = criterion(out, target)
valid_losses.append(loss.item())
_, predicted = torch.max(out.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
train_loss = np.average(train_losses)
valid_loss = np.average(valid_losses)
avg_train_losses.append(train_loss)
avg_valid_losses.append(valid_loss)
print_loss_msg = (f'\r[{epoch:>{str_epochs}}/{num_epochs:>{str_epochs}} ]' +
f' train_loss: {train_loss:.5f} ' +
f'/ valid_loss: {valid_loss:.5f}' +
f'/ valid_acc: {100 * correct / total:.3f}')
print(print_loss_msg)
# Early Stopping
if valid_loss < early_stopping:
print('Early stopping!!!')
break
# save the weights and the model
save_fn = os.path.join(model_path, "vad_model.pt")
torch.save(net, save_fn)
print('\nsaving the model')
``` |
{
"source": "Jihadist/Zabbix-in-Telegram",
"score": 2
} |
#### File: Jihadist/Zabbix-in-Telegram/ZbxTgDaemon.py
```python
import sys
import os
import hashlib
import re
import time
from os.path import dirname
import zbxtg_settings
import zbxtg
from pyzabbix import ZabbixAPI, ZabbixAPIException
class zabbixApi():
def __init__(self, server, user, password):
self.api = ZabbixAPI(server)
self.user = user
self.password = password
def login(self):
self.api.login(self.user, self.password)
def triggers_active(self):
return self.api.trigger.get(output="extend", monitored=True, filter={"value": 1}, sortfield="priority", sortorder="DESC",
selectHosts="extend")
def print_message(string):
string = str(string) + "\n"
filename = sys.argv[0].split("/")[-1]
sys.stderr.write(filename + ": " + string)
def file_write(filename, text):
with open(filename, "w") as fd:
fd.write(str(text))
return True
def file_read(filename):
with open(filename, 'r') as fd:
text = fd.readlines()
return text
def main():
TelegramAPI = zbxtg.TelegramAPI
ZabbixWeb = zbxtg.ZabbixWeb
tmp_dir = zbxtg_settings.zbx_tg_tmp_dir
if not zbxtg_settings.zbx_tg_daemon_enabled:
print("You should enable daemon by adding 'zbx_tg_remote_control' in the configuration file")
sys.exit(1)
tmp_uids = tmp_dir + "/uids.txt"
tmp_ts = {
"message_id": tmp_dir + "/daemon_message_id.txt",
"update_offset": tmp_dir + "/update_offset.txt",
}
for i, v in tmp_ts.iteritems():
if not os.path.exists(v):
print_message("{0} doesn't exist, creating new one...".format(v))
file_write(v, "0")
print_message("{0} successfully created".format(v))
message_id_last = file_read(tmp_ts["message_id"])[0].strip()
if message_id_last:
message_id_last = int(message_id_last)
update_id = file_read(tmp_ts["update_offset"])
tg = TelegramAPI(key=zbxtg_settings.tg_key)
if zbxtg_settings.proxy_to_tg:
proxy_to_tg = zbxtg_settings.proxy_to_tg
if not proxy_to_tg.find("http") and not proxy_to_tg.find("socks"):
proxy_to_tg = "https://" + proxy_to_tg
tg.proxies = {
"https": "{0}".format(zbxtg_settings.proxy_to_tg),
}
zbx = ZabbixWeb(server=zbxtg_settings.zbx_server, username=zbxtg_settings.zbx_api_user,
password=zbxtg_settings.zbx_api_pass)
if zbxtg_settings.proxy_to_zbx:
zbx.proxies = {"http": "http://{0}/".format(zbxtg_settings.proxy_to_zbx)}
try:
zbx_api_verify = zbxtg_settings.zbx_api_verify
zbx.verify = zbx_api_verify
except:
pass
zbxapi = zabbixApi(zbxtg_settings.zbx_server, zbxtg_settings.zbx_api_user, zbxtg_settings.zbx_api_pass)
zbxapi.login()
print(tg.get_me())
#hosts = zbxdb.db_query("SELECT hostid, host FROM hosts")
commands = [
"/triggers",
"/help",
# "/graph",
# "/history",
# "/screen"
]
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
md5sum = md5("ZbxTgDaemon.py")
print md5sum
try:
while True:
time.sleep(1)
md5sum_new = md5("ZbxTgDaemon.py")
if md5sum != md5sum_new:
sys.exit(1)
tg.update_offset = update_id
updates = tg.get_updates()
if not updates["result"]:
continue
for m in updates["result"]:
if "message" not in m:
continue
update_id_last = m["update_id"]
tg.update_offset = update_id_last
if m["message"]["from"]["id"] not in zbxtg_settings.zbx_tg_daemon_enabled_ids:
file_write(tmp_ts["update_offset"], update_id_last)
continue
print("Fuck this shit, I'm not going to answer to someone not from the whitelist")
else:
if not "text" in m["message"]:
continue
text = m["message"]["text"]
to = m["message"]["from"]["id"]
reply_text = list()
if m["message"]["message_id"] > message_id_last:
if re.search(r"^/(start|help)", text):
reply_text.append("Hey, this is ZbxTgDaemon bot.")
reply_text.append("https://github.com/ableev/Zabbix-in-Telegram")
reply_text.append("If you need help, you can ask it in @ZbxTg group\n")
reply_text.append("Available commands:")
reply_text.append("\n".join(commands))
tg.disable_web_page_preview = True
if re.search(r"^/triggers", text):
triggers = zbxapi.triggers_active()
if triggers:
for t in triggers:
reply_text.append("Severity: {0}, Host: {1}, Trigger: {2}".format(
t["priority"], t["hosts"][0]["host"].encode('utf-8'), t["description"].encode('utf-8')
))
else:
reply_text.append("There are no triggers, have a nice day!")
if not reply_text:
reply_text = ["I don't know what to do about it"]
if tg.send_message(to, reply_text):
with open(tmp_ts["message_id"], "w") as message_id_file:
message_id_file.write(str(m["message"]["message_id"]))
message_id_last = m["message"]["message_id"]
tg.disable_web_page_preview = False
file_write(tmp_ts["update_offset"], update_id_last)
except KeyboardInterrupt:
print("Exiting...")
if __name__ == "__main__":
main()
``` |
{
"source": "jihaekor/knowru_client",
"score": 2
} |
#### File: knowru_client/knowru_client/knowru_client.py
```python
from __future__ import absolute_import, unicode_literals
import traceback
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util import Retry
from timezone_logging.timezone_logging import get_timezone_logger
class KnowruClient(object):
def __init__(self, token, knowru_url='https://www.knowru.com', retry_total=3, backoff_factor=1, status_forcelist=(502, 503, 504), method_whitelist=('GET', 'POST')):
if knowru_url.endswith('/'):
knowru_url = knowru_url[:-1]
self.knowru_url = knowru_url
self._token = token
self.headers = {'Authorization': 'Token {}'.format(token), 'Content-Type': 'application/json', 'Accept': 'application/json'}
self._retry_total = retry_total
self._backoff_factor = backoff_factor
self._status_forcelist = status_forcelist
self._method_whitelist = method_whitelist
self._adapter = HTTPAdapter(max_retries=Retry(total=retry_total, backoff_factor=backoff_factor, status_forcelist=status_forcelist, method_whitelist=method_whitelist))
self.session = requests.Session()
self.session.mount(knowru_url, self._adapter)
def run_runnable(self, runnable_name, input_data, output_if_error=None):
try:
r = self.session.post('{}/api/runnable/{}/run.json/'.format(self.knowru_url, runnable_name), json={'input': input_data}, headers=self.headers)
except Exception as e:
logger = get_timezone_logger('KnowruClient.call_runnable')
logger.error(traceback.format_exc())
if output_if_error is not None:
return output_if_error
else:
raise e
else:
return r.json()['output']
``` |
{
"source": "jihan1218/multi-agent-predator-prey",
"score": 2
} |
#### File: training/ppo_pkg/ppo.py
```python
import numpy as np
import tensorflow as tf
import gym
import time
import operator
from copy import deepcopy
from training.ppo_pkg import core
from training.ppo_pkg.specs import pi_specs, v_specs
from spinup.utils.logx import EpochLogger
from spinup.utils.mpi_tf import MpiAdamOptimizer, sync_all_params
from spinup.utils.mpi_tools import mpi_fork, mpi_avg, proc_id, mpi_statistics_scalar, num_procs
from ma_policy.util import listdict2dictnp
from mae_envs.viewer.policy_viewer import splitobs
from gym.spaces import Box, Discrete, Dict, MultiDiscrete, Tuple
from ma_policy.ma_policy import MAPolicy
class PPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.n_agents = 1
self.size = size # Buffer size
self.obs_buf = {} # Observations
self.act_buf = {} # Actions
self.adv_buf = [] # Advantage estimations
self.rew_buf = [] # Rewards
self.ret_buf = [] # Returns (Rewards-to-go)
self.val_buf = [] # Values (From the value network)
self.logp_buf = [] # Log probabilities of actions
self.gamma, self.lam = gamma, lam # Gamma and lambda are used for GAE advantage estimation
self.ptr, self.path_start_idx, self.max_size = 0, 0, size # Control variables for buffer management
def store(self, obs, act, rew, val, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
if self.ptr == 0:
self.obs_buf = obs
self.act_buf = act
self.rew_buf = rew
self.val_buf = val
self.logp_buf = logp
self.n_agents = rew.size
else:
for k, v in obs.items():
self.obs_buf[k] = np.vstack((self.obs_buf[k], obs[k]))
for k, v in act.items():
self.act_buf[k] = np.vstack((self.act_buf[k], act[k]))
self.rew_buf = np.vstack((self.rew_buf, rew))
self.val_buf = np.vstack((self.val_buf, val))
self.logp_buf = np.vstack((self.logp_buf, logp))
self.ptr += 1
def finish_path(self, last_val=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
if self.n_agents > 1:
if last_val == 0:
last_val = [0, 0]
rews = np.vstack((self.rew_buf[path_slice], last_val))
vals = np.vstack((self.val_buf[path_slice], last_val))
else:
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = core.discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = core.discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self, aux_vars_only=False):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
# buffer has to be full before you can get the data from it
assert self.ptr == self.max_size
self.ptr, self.path_start_idx = 0, 0
buf_shape = (self.size, self.n_agents)
adv_list = np.zeros(buf_shape, dtype=np.float32)
ret_list = np.zeros(buf_shape, dtype=np.float32)
for i in range(self.size):
for j in range(self.n_agents):
adv_list[i][j] = self.adv_buf[i][j]
ret_list[i][j] = self.ret_buf[i][j]
# the next lines implement the advantage normalization trick
adv_mean = np.zeros(self.n_agents, dtype=np.float32)
adv_std = np.zeros(self.n_agents, dtype=np.float32)
for i in range(self.n_agents):
mean, std = mpi_statistics_scalar(np.reshape(adv_list[:, i], (self.size, 1)))
adv_mean[i] = mean
adv_std[i] = std
self.adv_buf = (adv_list - adv_mean) / adv_std
self.ret_buf = ret_list
act_data = [v for k, v in self.act_buf.items()]
obs_data = [v for k, v in self.obs_buf.items()]
aux_shape = (self.size * self.n_agents, 1)
if aux_vars_only:
return [np.reshape(self.adv_buf, aux_shape), np.reshape(self.ret_buf, aux_shape),
np.reshape(self.logp_buf, aux_shape)]
else:
return [act_data[0], obs_data[0], np.reshape(self.adv_buf, aux_shape), np.reshape(self.ret_buf, aux_shape),
np.reshape(self.logp_buf, aux_shape)]
# ==================================================================================================== #
# ====================================== PPO TRAINING FUNCTION ======================================= #
# ==================================================================================================== #
def ppo(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=33,
steps_per_epoch=4000, epochs=50, gamma=0.998, clip_ratio=0.2, pi_lr=3e-4,
vf_lr=1e-3, train_pi_iters=60, train_v_iters=60, lam=0.95, max_ep_len=1000,
target_kl=0.01, logger_kwargs=dict(), save_freq=10):
"""
Proximal Policy Optimization (by clipping),
with early stopping based on approximate KL
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp`` (batch,) | Gives log probability, according to
| the policy, of taking actions ``a_ph``
| in states ``x_ph``.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``.
``v`` (batch,) | Gives the value estimate for states
| in ``x_ph``. (Critical: make sure
| to flatten this!)
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to PPO.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs of interaction (equivalent to
number of policy updates) to perform.
gamma (float): Discount factor. (Always between 0 and 1.)
clip_ratio (float): Hyperparameter for clipping in the policy objective.
Roughly: how far can the new policy go from the old policy while
still profiting (improving the objective function)? The new policy
can still go farther than the clip_ratio says, but it doesn't help
on the objective anymore. (Usually small, 0.1 to 0.3.) Typically
denoted by :math:`\epsilon`.
pi_lr (float): Learning rate for policy optimizer.
vf_lr (float): Learning rate for value function optimizer.
train_pi_iters (int): Maximum number of gradient descent steps to take
on policy loss per epoch. (Early stopping may cause optimizer
to take fewer than this.)
train_v_iters (int): Number of gradient descent steps to take on
value function per epoch.
lam (float): Lambda for GAE-Lambda. (Always between 0 and 1,
close to 1.)
max_ep_len (int): Maximum length of trajectory / episode / rollout.
target_kl (float): Roughly what KL divergence we think is appropriate
between new and old policies after an update. This will get used
for early stopping. (Usually small, 0.01 or 0.05.)
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
## Logger setup
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
## Random seed setting
seed += 10000 * proc_id()
tf.set_random_seed(seed)
np.random.seed(seed)
## Environment instantiation
env = env_fn()
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
# Policies vector (only one for this project)
policies = []
# TensorFlow session
sess = tf.Session()
# Build policy anc value networks
MAP = MAPolicy(scope='policy_0', ob_space=env.observation_space, ac_space=env.action_space, network_spec=pi_specs,
normalize=True, v_network_spec=v_specs)
policies = [MAP]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Create aux placeholders for the computation graph
adv_ph, ret_ph, logp_old_ph = core.placeholders(1, 1, 1)
# Get main placeholders for the computation graph
map_phs_dict = MAP.phs
map_phs = [v for k, v in map_phs_dict.items()]
for k, v in map_phs_dict.items():
if v.name == None:
v.name = k
# Append aux and main placeholders
# Need placeholders in *this* order later (to zip with data from buffer)
new_phs = [adv_ph, ret_ph, logp_old_ph]
all_phs = np.append(map_phs, new_phs)
# Intantiate Experience buffer
local_steps_per_epoch = int(steps_per_epoch / num_procs())
buf = PPOBuffer(obs_dim, act_dim, local_steps_per_epoch, gamma, lam)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in ['policy_net', 'vpred_net'])
logger.log('\nNumber of parameters: \t pi: %d, \t v: %d\n' % var_counts)
# PPO objectives
ratio = tf.exp(MAP.taken_action_logp - logp_old_ph) # pi(a|s) / pi_old(a|s)
min_adv = tf.where(adv_ph > 0, (1 + clip_ratio) * adv_ph, (1 - clip_ratio) * adv_ph) # PPO-clip limits
pi_loss = -tf.reduce_mean(tf.minimum(ratio * adv_ph, min_adv)) # Policy loss function
v_loss = tf.reduce_mean((ret_ph - MAP.scaled_value_tensor) ** 2) # Value loss function
# Info (useful to watch during learning)
approx_kl = tf.reduce_mean(
logp_old_ph - MAP.taken_action_logp) # a sample estimate for KL-divergence, easy to compute
approx_ent = tf.reduce_mean(-MAP.taken_action_logp) # a sample estimate for entropy, also easy to compute
clipped = tf.logical_or(ratio > (1 + clip_ratio),
ratio < (1 - clip_ratio)) # a logical value which states whether there was clipping
clipfrac = tf.reduce_mean(tf.cast(clipped, tf.float32)) # a measure of clipping for posterior analysis
# Optimizers
train_pi = MpiAdamOptimizer(learning_rate=pi_lr).minimize(pi_loss) # Policy network optimizer
train_v = MpiAdamOptimizer(learning_rate=vf_lr).minimize(v_loss) # Value network optimizer
# initialize TensorFlow variabels
sess.run(tf.global_variables_initializer())
# Sync params across processes
sess.run(sync_all_params())
# Set up logger variables to be saved (it is necessary to save everything that is
# input/output to the networks so that the policy can be played afterwards during testing)
out_act_dict = MAP.sampled_action
out_state_dict = MAP.state_out
logger_outputs = {**out_act_dict, **out_state_dict}
for k, v in logger_outputs.items():
if 'lstm' in k:
logger_outputs[k + '_out'] = logger_outputs.pop(k)
logger_inputs = map_phs_dict
logger.setup_tf_saver(sess, inputs=logger_inputs, outputs=logger_outputs)
# ======================================================================== #
# ===================== Auxiliary Training Functions ===================== #
# ======================================================================== #
# Compute metrics for analysis during and after training
def compute_metrics(extra_dict={}):
loss_outs = {'pi_loss': pi_loss,
'v_loss': v_loss,
'approx_ent': approx_ent,
'approx_kl': approx_kl,
'approx_cf': clipfrac,
'taken_action_logp': MAP.taken_action_logp,
'ratio': ratio,
'min_adv': min_adv}
out_loss = policies[0].sess_run(buf.obs_buf,
sess_act=sess,
extra_feed_dict=extra_dict,
other_outputs=loss_outs,
replace=True)
return out_loss['pi_loss'], out_loss['v_loss'], out_loss['approx_ent'], out_loss['approx_kl'], out_loss[
'approx_cf']
# ======================================================================= #
# Run session on policy and value optimizers for training their respective networks
def train(net, extra_dict={}):
if net == 'pi':
train_outs = {'train_pi': train_pi,
'approx_kl': approx_kl}
elif net == 'v':
train_outs = {'train_v': train_v}
else:
print("Error: Network not defined")
return
out_train = policies[0].sess_run(buf.obs_buf,
sess_act=sess,
extra_feed_dict=extra_dict,
other_outputs=train_outs,
replace=True)
if net == 'pi':
return out_train['approx_kl']
# ======================================================================= #
# Perform training procedure
def update():
print("======= update!")
# get aux data from the buffer and match it with its respective placeholders
buf_data = buf.get(aux_vars_only=True)
aux_inputs = {k: v for k, v in zip(new_phs, buf_data)}
# for the training, the actions taken during the experience loop are also inputs to the network
extra_dict = {k: v for k, v in buf.act_buf.items() if k is not 'vpred'}
for k, v in extra_dict.items():
if k == 'action_movement':
extra_dict[k] = np.expand_dims(v, 1)
# actions and aux variables from the buffer are joined and passed to compute_metrics (observations are joined within the functions)
extra_dict.update(aux_inputs)
pi_l_old, v_l_old, ent, kl, cf = compute_metrics(extra_dict)
# Policy training loop
for i in range(train_pi_iters):
if i % 10 == 0:
print("training pi iter ", i)
kl = train('pi', extra_dict)
kl = mpi_avg(kl)
if kl > 1.5 * target_kl:
logger.log('Early stopping at step %d due to reaching max kl.' % i)
break
logger.store(StopIter=i)
print("")
# Value training loop
for j in range(train_v_iters):
if j % 10 == 0:
print("training v iter ", j)
train('v', extra_dict)
# Log changes from update with a new run on compute_metrics
pi_l_new, v_l_new, ent, kl, cf = compute_metrics(extra_dict)
# Store information
logger.store(LossPi=pi_l_old, LossV=v_l_old,
KL=kl, Entropy=ent, ClipFrac=cf,
DeltaLossPi=(pi_l_new - pi_l_old),
DeltaLossV=(v_l_new - v_l_old))
# Reset experience varibales
o, ep_ret, ep_len = env.reset(), 0, 0
# Reset policy
for policy in policies:
policy.reset()
print("======= update finished!")
# ======================================================================= #
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# ======================================================================= #
# ========================== Experience Loop ============================ #
# ======================================================================= #
# Main loop: collect experience in env and update/log each epoch
for epoch in range(epochs):
print("epoch: ", epoch)
for t in range(local_steps_per_epoch):
# Pass observations through networs and get action + predicted value
if len(policies) == 1: # this project's case
a, info = policies[0].sess_run(o, sess_act=sess)
v_t = info['vpred']
logp_t = info['ac_logp']
else:
o = splitobs(o, keepdims=False)
ob_policy_idx = np.split(np.arange(len(o)), len(policies))
actions = []
for i, policy in enumerate(policies):
inp = operator.itemgetter(*ob_policy_idx[i])(o)
inp = listdict2dictnp([inp] if ob_policy_idx[i].shape[0] == 1 else inp)
ac, info = policy.act(inp)
actions.append(ac)
action = listdict2dictnp(actions, keepdims=True)
# Take a step in the environment
o2, r, d, env_info = env.step(a)
ep_ret += r
ep_len += 1
# If env.render is uncommented, the experience loop is displayed (visualized)
# in real time (much slower, but excelent debugging)
# env.render()
# save experience in buffer and log
buf.store(o, a, r, v_t, logp_t)
logger.store(VVals=v_t)
# Update obs (critical!)
o = o2
# Treat the end of a trajectory
terminal = d or (ep_len == max_ep_len)
if terminal or (t == local_steps_per_epoch - 1) or env_info.get('discard_episode', False):
if not (terminal):
print('Warning: trajectory cut off by epoch at %d steps.' % ep_len)
# if trajectory didn't reach terminal state, bootstrap value target
if d:
last_val = 0
else:
_, info = policies[0].sess_run(o, sess_act=sess)
last_val = info['vpred']
# Compute advantage estimates and rewards-to-go
buf.finish_path(last_val)
if terminal:
# only save EpRet / EpLen if trajectory finished
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
for policy in policies:
policy.reset()
# Save model
if (epoch % save_freq == 0) or (epoch == epochs - 1):
print("Saved epoch: ", epoch)
logger.save_state({'env': env}, None)
# Perform PPO update!
update()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('VVals', with_min_and_max=True)
logger.log_tabular('TotalEnvInteracts', (epoch + 1) * steps_per_epoch)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossV', average_only=True)
logger.log_tabular('DeltaLossPi', average_only=True)
logger.log_tabular('DeltaLossV', average_only=True)
logger.log_tabular('Entropy', average_only=True)
logger.log_tabular('KL', average_only=True)
logger.log_tabular('ClipFrac', average_only=True)
logger.log_tabular('StopIter', average_only=True)
logger.log_tabular('Time', time.time() - start_time)
logger.dump_tabular()
# ==================================================================================================== #
# ==================================================================================================== #
# ==================================================================================================== #
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--hid', type=int, default=64)
parser.add_argument('--l', type=int, default=2)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--cpu', type=int, default=4)
parser.add_argument('--steps', type=int, default=4000)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='ppo')
args = parser.parse_args()
mpi_fork(args.cpu) # run parallel code with mpi
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
ppo(lambda: gym.make(args.env), actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[args.hid] * args.l), gamma=args.gamma,
seed=args.seed, steps_per_epoch=args.steps, epochs=args.epochs,
logger_kwargs=logger_kwargs)
``` |
{
"source": "jihan1218/shepherd_gym",
"score": 2
} |
#### File: shepherd_gym/examples/shepherd_curriculum.py
```python
import gym
import argparse
import numpy as np
import shepherd_gym
# import stable-baselines utilities
from stable_baselines import PPO2
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.policies import MlpPolicy,MlpLstmPolicy
def main():
parser = argparse.ArgumentParser(description='PPO baseline implementation')
parser.add_argument('-e', '--experiment', type=str, default='ppo_test',
help='name of experiment')
parser.add_argument('-w', '--env', type=str, default='Shepherd-v0',
help='name of gym environment')
parser.add_argument('-m', '--mode', type=str, default='train',
help='mode to run experiment')
parser.add_argument('-p', '--policy', type=str, default='mlp',
help='type of policy network')
parser.add_argument('-t', '--timesteps', type=int, default=10000,
help='number of timesteps to train')
parser.add_argument('-d', '--datapath', type=str, default='../data',
help='path to save results')
args = parser.parse_args()
mode = args.mode
env_name = args.env
policy = args.policy
data_path = args.datapath
timesteps = args.timesteps
experiment = args.experiment
exp_path = '{}/{}'.format(data_path,experiment)
log_path = '{}/log_{}'.format(exp_path, timesteps)
model_path = '{}/model_{}'.format(exp_path, timesteps)
env = gym.make(env_name)
env = shepherd_gym.wrappers.SamplerWrapper(env,
demo_path='../data/curriculum',
increment_freq=250)
env = DummyVecEnv([lambda: env])
if policy=='mlp':
policy_type = MlpPolicy
else:
policy_type = MlpLstmPolicy
model = PPO2(policy_type, env, verbose=1,
tensorboard_log=log_path, nminibatches=1)
if mode == 'train':
model.learn(total_timesteps=timesteps)
model.save(model_path)
else:
model.load(model_path)
env.render()
obs = env.reset()
for _ in range(1000):
action, _states = model.predict(obs)
obs, _, _, _ = env.step(action)
env.render()
# complete simulation
env.close()
if __name__=='__main__':
main()
```
#### File: shepherd_gym/envs/shepherd_env.py
```python
import warnings
warnings.filterwarnings("ignore")
# ipython debugging
from IPython.terminal.debugger import set_trace as keyboard
# core modules
import gym
import random
import numpy as np
from gym import spaces
import matplotlib.pyplot as plt
class ShepherdEnv(gym.Env):
"""
Define the shepherding environment.
The environment treats the dog as the agent
and the sheep as a part of the environment.
State:
1) Position of center of mass (x,y)
2) Position of farthest sheep (x,y)
3) Position of target (x,y)
4) Position of dog (x,y)
5) Radius of sheep (r)
6) Distance to target (d)
Action:
1) Increment in position of dog (x,y)
Reward:
1) Negative of farthest sheep distance to com (d_f)
2) Negative of com distance to target (d_t)
"""
def __init__(self, continuous=False, num_sheep=25, info_mode=0,
fixed_reset=False, sparse_reward=False):
# initialize observation space
obs_low = np.array(10*[-1000.0])
obs_high = np.array(10*[1000.0])
self.observation_space = spaces.Box(low=obs_low, high=obs_high)
# setup environment type
self.continuous = continuous
# initialize action space
if self.continuous:
act_low = np.array([-np.pi])
act_high = np.array([np.pi])
self.action_space = spaces.Box(low=act_low, high=act_high)
else:
self.action_space = spaces.Discrete(8)
# limit episode length
self.max_steps = 500
# conditions to terminate
self.boundary = 400.0
self.max_radius = 100.0
self.max_distance = 400.0
# create buffer and episode variable
self.curr_step = -1
self.curr_episode = -1
# radius for sheep to be considered as collected by dog
self.dog_collect_radius = 2.0
# parameters for initialization
self.init_sheep_root = 200.0
self.init_sheep_range = 50.0
self.init_dog_distance = 60.0
# weight multipliers for sheep forces
self.com_term = 1.05
self.noise_term = 0.3
self.inertia_term = 0.5
self.repulsion_dog_term = 1.0
self.repulsion_sheep_term = 2.0
# constants used to update environment
self.delta_sheep_pose = 1.0
self.dog_repulsion_dist = 70.0
self.sheep_repulsion_dist = 2.0
# assign number of sheep
self.num_sheep = num_sheep
# flag to show simulation, false by default
self.info_mode = info_mode
self.fixed_reset = fixed_reset
# info variables
self.episode_length = 0.0
self.episode_reward = 0.0
# flag for sparse reward
self.sparse_reward = sparse_reward
# initialize plot figure
self.fig = None
def step(self, action):
"""
The dog takes a step in the environment
Parameters
----------
action : float array
Returns
-------
ob, reward, episode_over, info : tuple
observation (float array) :
observation after dog position is updated.
reward (float) :
amount of reward achieved by dog in the previous step.
episode_over (bool) :
flag that indicates if the environment is reset or not.
info (dict) :
useful information about the environment for debugging.
"""
success = False
self.curr_step += 1
self._take_action(action)
self._take_action(action)
self._take_action(action)
# initialize reward and get state
reward = 0.0
ob = self._get_state()
# give dense rewards
if not self.sparse_reward:
reward = self._get_reward()
# bad terminal conditions
if self.curr_step >= self.max_steps \
or self.target_distance >= self.max_distance \
or self.mean_radius_sheep >= self.max_radius:
self.finish = True
if self.sparse_reward:
reward = -1.0
# good terminal conditions
if self.target_distance <= 1.0:
success = True
self.finish = True
if self.sparse_reward:
reward = 1.0
# update rl parameters
self.episode_length += 1
self.episode_reward += reward
# generate info return parameter
if self.info_mode == 1 and self.finish:
info = {'r':self.episode_reward, 'l':self.episode_length,
's': success}
else:
info = {'n':self.num_sheep, 's': success}
return ob, reward, self.finish, info
def reset(self):
"""
Reset the environment and return the init state
Returns
-------
observation (float array) : initial observation after reset.
"""
# initialize gym env variables
self.finish = False
self.curr_step = -1
self.curr_episode += 1
# initialize target position
self.target = np.random.uniform(-10.0,10.0,size=(2))
# initialize sheep positions
if self.fixed_reset:
init_sheep_pose = np.array([75.0, 75.0])
self.sheep_poses = (np.random.uniform(-50.0, 50.0,
size=(self.num_sheep,2))) + init_sheep_pose[None,:]
else:
init_sheep_pose = np.random.uniform(-self.init_sheep_root,
self.init_sheep_root, size=(2))
self.sheep_poses = (np.random.uniform(-self.init_sheep_range,
self.init_sheep_range, size=(self.num_sheep,2))) \
+ init_sheep_pose[None,:]
self.sheep_com = self.sheep_poses.mean(axis=0)
# get the farthest sheep and radius of the sheep
dist_to_com = np.linalg.norm((self.sheep_poses - self.sheep_com[None,:]), axis=1)
self.farthest_sheep = self.sheep_poses[np.argmax(dist_to_com),:]
self.radius_sheep = np.array([np.max(dist_to_com)])
# update distance to target
self.target_distance = np.linalg.norm(self.target - self.sheep_com)
# initialize values for reward estimation
self.init_radius_sheep = self.radius_sheep
self.init_target_distance = self.target_distance
# initialize dog position
if self.fixed_reset:
init_dog_pose = np.array([0.0,75.0])
else:
init_theta = np.random.uniform(-np.pi,np.pi)
init_dog_pose = init_sheep_pose + self.init_dog_distance*np.array([np.cos(init_theta),
np.sin(init_theta)])
self.dog_pose = init_dog_pose
# initialize inertia
self.inertia = np.ones((self.num_sheep, 2))
# initialize episode reward and length
self.episode_reward = 0
self.episode_length = 0
# get the state, reward, finish, info
state = self._get_state()
return state
def reset_from_state(self, state):
"""
Reset the environment from given state
Returns
-------
observation (float array) : initial observation after reset.
"""
# initialize gym env variables
self.finish = False
self.curr_step = -1
self.curr_episode += 1
# initialize target position
self.target = state[4:6]
# initialize sheep com
self.sheep_com = state[0:2]
# get the farthest sheep and radius of the sheep
self.farthest_sheep = state[2:4]
self.radius_sheep = np.array([state[8]])
# update distance to target
self.target_distance = np.array([state[9]])
# initialize sheep position
self.sheep_poses = (np.random.uniform(-0.75*self.radius_sheep,
0.75*self.radius_sheep, size=(self.num_sheep,2))) \
+ self.sheep_com[None,:]
rnd_ind = np.random.choice(self.num_sheep)
self.sheep_poses[rnd_ind,:] = state[2:4]
# initialize values for reward estimation
self.init_radius_sheep = self.radius_sheep
self.init_target_distance = self.target_distance
# initialize dog position
init_dog_pose = state[6:8]
self.dog_pose = init_dog_pose
# initialize inertia
self.inertia = np.ones((self.num_sheep, 2))
# initialize episode reward and length
self.episode_reward = 0
self.episode_length = 0
# get the state, reward, finish, info
state = self._get_state()
return state
def close(self):
"""Clean exit for environment"""
if self.fig:
plt.close('all')
plt.ioff()
def seed(self, seed):
"""Function to set the seed of env"""
random.seed(seed)
np.random.seed(seed)
def _take_action(self, action):
"""Update position of dog based on action and env"""
if isinstance(action, list) or isinstance(action, np.ndarray):
action = action[0]
if self.continuous:
increment = np.array([1.5*np.cos(action),1.5*np.sin(action)])
else:
increment = np.array([0.0,0.0])
if action == 0:
increment[0] = 1.5
elif action == 1:
increment[0] = 1.225
increment[1] = 1.225
elif action == 2:
increment[1] = 1.5
elif action == 3:
increment[0] = -1.225
increment[1] = 1.225
elif action == 4:
increment[0] = -1.5
elif action == 5:
increment[0] = -1.225
increment[1] = -1.225
elif action == 6:
increment[1] = -1.5
elif action == 7:
increment[0] = 1.225
increment[1] = -1.225
else:
print('NOP!')
self.dog_pose += increment
self._update_environment()
def _update_environment(self):
"""Update environment based on new position of dog"""
# compute a distance matrix
distance_matrix = np.zeros((self.num_sheep,self.num_sheep))
for i in range(self.num_sheep):
for j in range(i):
dist = np.linalg.norm(self.sheep_poses[i,:] - self.sheep_poses[j,:])
distance_matrix[i,j] = dist
distance_matrix[j,i] = dist
# find the sheep which are within 2 meters distance
xvals, yvals = np.where((distance_matrix < self.sheep_repulsion_dist) & (distance_matrix != 0))
interact = np.hstack((xvals[:,None],yvals[:,None]))
# compute the repulsion forces within sheep
repulsion_sheep = np.zeros((self.num_sheep,2))
for val in range(self.num_sheep):
iv = interact[interact[:,0] == val,1]
transit = self.sheep_poses[val,:][None,:] - self.sheep_poses[iv,:]
transit /= np.linalg.norm(transit, axis=1, keepdims=True)
repulsion_sheep[val,:] = np.sum(transit, axis=0)
repulsion_sheep /= np.linalg.norm(repulsion_sheep, axis=1, keepdims=True)
repulsion_sheep[np.isnan(repulsion_sheep)] = 0
# find sheep near dog
dist_to_dog = np.linalg.norm((self.sheep_poses - self.dog_pose[None,:]), axis=1)
sheep_inds = np.where(dist_to_dog < self.dog_repulsion_dist)
near_sheep = sheep_inds[0]
# repulsion from dog
repulsion_dog = np.zeros((self.num_sheep,2))
repulsion_dog[near_sheep,:] = self.sheep_poses[near_sheep,:] - self.dog_pose[None,:]
repulsion_dog /= np.linalg.norm(repulsion_dog, axis=1, keepdims=True)
repulsion_dog[np.isnan(repulsion_dog)] = 0
# attraction to COM
attraction_com = np.zeros((self.num_sheep,2))
attraction_com[near_sheep,:] = self.sheep_com[None,:] - self.sheep_poses[near_sheep,:]
attraction_com /= np.linalg.norm(attraction_com, axis=1, keepdims=True)
attraction_com[np.isnan(attraction_com)] = 0
# error term
noise = np.random.randn(self.num_sheep,2)
noise /= np.linalg.norm(noise, axis=1, keepdims=True)
# compute sheep motion direction
self.inertia = self.inertia_term*self.inertia + self.com_term*attraction_com + \
self.repulsion_sheep_term*repulsion_sheep + self.repulsion_dog_term*repulsion_dog + \
self.noise_term*noise
# normalize the inertia terms
self.inertia /= np.linalg.norm(self.inertia, axis=1, keepdims=True)
self.inertia[np.isnan(self.inertia)] = 0
# find new sheep position
self.sheep_poses += self.delta_sheep_pose*self.inertia
self.sheep_com = np.mean(self.sheep_poses,axis=0)
# get the farthest sheep and radius of the sheep
dist_to_com = np.linalg.norm((self.sheep_poses - self.sheep_com[None,:]), axis=1)
self.radius_sheep = np.array([np.max(dist_to_com)])
self.mean_radius_sheep = np.array([np.mean(dist_to_com)])
self.farthest_sheep = self.sheep_poses[np.argmax(dist_to_com),:]
# update distance to target
self.target_distance = np.linalg.norm(self.target - self.sheep_com)
def _get_reward(self):
"""Return reward based on action of the dog"""
# compute reward depending on the radius and distance to target
radius_reward = -(self.radius_sheep*0.9)/self.init_sheep_root
target_reward = -(self.target_distance*0.9)/self.init_sheep_root
reward = target_reward + radius_reward
# ensure it is always an array
if not type(reward) is np.ndarray:
reward = np.array([reward])
return reward[0]
def _get_state(self):
"""Return state based on action of the dog"""
# stack all variables and return state array
state = np.hstack((self.sheep_com, self.farthest_sheep,
self.target, self.dog_pose, self.radius_sheep,
self.target_distance))
return state
def render(self, mode='default', subgoal=None):
if self.curr_step%5 == 0:
if not self.fig:
# create a figure
self.fig = plt.figure()
plt.ion()
plt.show()
plt.clf()
theta = np.linspace(0.0,2*np.pi, num=100)
plt.plot(self.boundary*np.cos(theta), self.boundary*np.sin(theta),
'-k', linewidth=3)
plt.scatter(self.target[0], self.target[1],
c='g', s=40, label='Goal')
plt.scatter(self.dog_pose[0], self.dog_pose[1],
c='r', s=50, label='Dog')
plt.scatter(self.sheep_poses[:,0], self.sheep_poses[:,1],
c='b', s=50, label='Sheep')
if mode == 'detailed':
plt.scatter(subgoal[0], subgoal[1],
c='m', s=75, label='Int Goal')
plt.title('Shepherding')
plt.xlim([-self.boundary,self.boundary])
plt.ylim([-self.boundary,self.boundary])
plt.legend()
plt.draw()
plt.pause(0.01)
return
```
#### File: shepherd_gym/wrappers/demo_sampler.py
```python
import os
import pickle
import numpy as np
from shepherd_gym.wrappers import Wrapper
class SamplerWrapper(Wrapper):
env = None
def __init__(self, env, demo_path,
increment_freq=100,
initial_window_width=10,
window_increment=10):
# inherit from base wrapper class
super().__init__(env)
# load demo dataset
self.demo_path = demo_path
with open('{}/curriculum.npz'.format(self.demo_path),'rb') as f:
self.demo_data = pickle.load(f)
# number of trajectories
self.num_traj = len(self.demo_data)
# initialize number of demos sampled
self.demo_sampled = 0
# initialize sampling variables
self.increment_freq = increment_freq
self.window_size = initial_window_width
self.window_increment = window_increment
def reset(self):
# get a state sample
state = self.sample()
return self.env.reset_from_state(state)
def sample(self):
# get a random episode index
ep_ind = np.random.choice(self.num_traj)
states = self.demo_data[ep_ind]
# sample uniformly
eps_len = states.shape[0]
index = np.random.randint(max(eps_len - self.window_size, 0), eps_len)
state = states[index]
# increment window size
self.demo_sampled += 1
if self.demo_sampled >= self.increment_freq:
if self.window_size < eps_len:
self.window_size += self.window_increment
self.demo_sampled = 0
# return the state
return state
``` |
{
"source": "jihang-zhang/acoustic_word_embeds_gated_cnn",
"score": 3
} |
#### File: acoustic_word_embeds_gated_cnn/code/average_precision.py
```python
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
# --
from scipy.special import comb
from scipy.spatial.distance import pdist
import numpy as np
def average_precision(data, labels):
"""
Calculate average precision and precision-recall breakeven, and return
the average precision / precision-recall breakeven calculated
using `same_dists` and `diff_dists`.
-------------------------------------------------------------------
returns average_precision, precision-recall break even : (float, float)
"""
num_examples = len(labels)
num_pairs = int(comb(num_examples, 2))
# build up binary array of matching examples
matches = np.zeros(num_pairs, dtype=np.bool)
i = 0
for n in range(num_examples):
j = i + num_examples - n - 1
matches[i:j] = (labels[n] == labels[n + 1:]).astype(np.int32)
i = j
num_same = np.sum(matches)
# calculate pairwise distances and sort matches
dists = pdist(data, metric="cosine")
matches = matches[np.argsort(dists)]
# calculate precision, average precision, and recall
precision = np.cumsum(matches) / np.arange(1, num_pairs + 1)
average_precision = np.sum(precision * matches) / num_same
recall = np.cumsum(matches) / num_same
# multiple precisions can be at single recall point, take max
for n in range(num_pairs - 2, -1, -1):
precision[n] = max(precision[n], precision[n + 1])
# calculate precision-recall breakeven
prb_ix = np.argmin(np.abs(recall - precision))
prb = (recall[prb_ix] + precision[prb_ix]) / 2.
return average_precision
``` |
{
"source": "jihao/traccar-cn-hass",
"score": 2
} |
#### File: custom_components/traccar_cn/device_tracker.py
```python
from datetime import datetime, timedelta
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_SSL, CONF_VERIFY_SSL,
CONF_PASSWORD, CONF_USERNAME, CONF_SCAN_INTERVAL,
CONF_MONITORED_CONDITIONS, CONF_EVENT)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util import slugify
from .const import (
ATTR_ADDRESS, ATTR_CATEGORY, ATTR_GEOFENCE,
ATTR_MOTION, ATTR_SPEED, ATTR_TRACKER, ATTR_TRACCAR_ID, ATTR_STATUS,
EVENT_DEVICE_MOVING, EVENT_COMMAND_RESULT, EVENT_DEVICE_FUEL_DROP,
EVENT_GEOFENCE_ENTER, EVENT_DEVICE_OFFLINE, EVENT_DRIVER_CHANGED,
EVENT_GEOFENCE_EXIT, EVENT_DEVICE_OVERSPEED, EVENT_DEVICE_ONLINE,
EVENT_DEVICE_STOPPED, EVENT_MAINTENANCE, EVENT_ALARM, EVENT_TEXT_MESSAGE,
EVENT_DEVICE_UNKNOWN, EVENT_IGNITION_OFF, EVENT_IGNITION_ON,
EVENT_ALL_EVENTS, CONF_MAX_ACCURACY, CONF_SKIP_ACCURACY_ON)
from .helper import gcj02towgs84
_LOGGER = logging.getLogger(__name__)
DEFAULT_SCAN_INTERVAL = timedelta(seconds=30)
SCAN_INTERVAL = DEFAULT_SCAN_INTERVAL
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=8082): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Required(CONF_MAX_ACCURACY, default=0): vol.All(vol.Coerce(int),
vol.Range(min=0)),
vol.Optional(CONF_SKIP_ACCURACY_ON,
default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_MONITORED_CONDITIONS,
default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EVENT,
default=[]): vol.All(cv.ensure_list,
[vol.Any(EVENT_DEVICE_MOVING,
EVENT_COMMAND_RESULT,
EVENT_DEVICE_FUEL_DROP,
EVENT_GEOFENCE_ENTER,
EVENT_DEVICE_OFFLINE,
EVENT_DRIVER_CHANGED,
EVENT_GEOFENCE_EXIT,
EVENT_DEVICE_OVERSPEED,
EVENT_DEVICE_ONLINE,
EVENT_DEVICE_STOPPED,
EVENT_MAINTENANCE,
EVENT_ALARM,
EVENT_TEXT_MESSAGE,
EVENT_DEVICE_UNKNOWN,
EVENT_IGNITION_OFF,
EVENT_IGNITION_ON,
EVENT_ALL_EVENTS)]),
})
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Validate the configuration and return a Traccar scanner."""
from pytraccar.api import API
session = async_get_clientsession(hass, config[CONF_VERIFY_SSL])
api = API(hass.loop, session, config[CONF_USERNAME], config[CONF_PASSWORD],
config[CONF_HOST], config[CONF_PORT], config[CONF_SSL])
scanner = TraccarScanner(
api, hass, async_see,
config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL),
config[CONF_MAX_ACCURACY], config[CONF_SKIP_ACCURACY_ON],
config[CONF_MONITORED_CONDITIONS], config[CONF_EVENT])
return await scanner.async_init()
class TraccarScanner:
"""Define an object to retrieve Traccar data."""
def __init__(self, api, hass, async_see, scan_interval, max_accuracy,
skip_accuracy_on, custom_attributes, event_types):
"""Initialize."""
from stringcase import camelcase
self._event_types = {camelcase(evt): evt for evt in event_types}
self._custom_attributes = custom_attributes
self._scan_interval = scan_interval
self._async_see = async_see
self._api = api
self.connected = False
self._hass = hass
self._max_accuracy = max_accuracy
self._skip_accuracy_on = skip_accuracy_on
async def async_init(self):
"""Further initialize connection to Traccar."""
await self._api.test_connection()
if self._api.connected and not self._api.authenticated:
_LOGGER.error("Authentication for Traccar failed")
return False
await self._async_update()
async_track_time_interval(self._hass,
self._async_update,
self._scan_interval)
return True
async def _async_update(self, now=None):
"""Update info from Traccar."""
if not self.connected:
_LOGGER.debug('Testing connection to Traccar')
await self._api.test_connection()
self.connected = self._api.connected
if self.connected:
_LOGGER.info("Connection to Traccar restored")
else:
return
_LOGGER.debug('Updating device data')
await self._api.get_device_info(self._custom_attributes)
self._hass.async_create_task(self.import_device_data())
if self._event_types:
self._hass.async_create_task(self.import_events())
self.connected = self._api.connected
async def import_device_data(self):
"""Import device data from Traccar."""
for device_unique_id in self._api.device_info:
device_info = self._api.device_info[device_unique_id]
device = None
attr = {}
skip_accuracy_filter = False
attr[ATTR_TRACKER] = 'traccar'
if device_info.get('address') is not None:
attr[ATTR_ADDRESS] = device_info['address']
if device_info.get('geofence') is not None:
attr[ATTR_GEOFENCE] = device_info['geofence']
if device_info.get('category') is not None:
attr[ATTR_CATEGORY] = device_info['category']
if device_info.get('speed') is not None:
attr[ATTR_SPEED] = device_info['speed']
if device_info.get('motion') is not None:
attr[ATTR_MOTION] = device_info['motion']
if device_info.get('traccar_id') is not None:
attr[ATTR_TRACCAR_ID] = device_info['traccar_id']
for dev in self._api.devices:
if dev['id'] == device_info['traccar_id']:
device = dev
break
if device is not None and device.get('status') is not None:
attr[ATTR_STATUS] = device['status']
for custom_attr in self._custom_attributes:
if device_info.get(custom_attr) is not None:
attr[custom_attr] = device_info[custom_attr]
if custom_attr in self._skip_accuracy_on:
skip_accuracy_filter = True
accuracy = 0.0
if device_info.get('accuracy') is not None:
accuracy = device_info['accuracy']
if (not skip_accuracy_filter and self._max_accuracy > 0 and
accuracy > self._max_accuracy):
_LOGGER.debug('Excluded position by accuracy filter: %f (%s)',
accuracy, attr[ATTR_TRACCAR_ID])
continue
coords = gcj02towgs84(device_info.get('longitude'), device_info.get('latitude'))
await self._async_see(
dev_id=slugify(device_info['device_id']),
gps=(coords[1], coords[0]), # device_info.get('latitude'), device_info.get('longitude')
gps_accuracy=accuracy,
battery=device_info.get('battery'),
attributes=attr)
async def import_events(self):
"""Import events from Traccar."""
device_ids = [device['id'] for device in self._api.devices]
end_interval = datetime.utcnow()
start_interval = end_interval - self._scan_interval
events = await self._api.get_events(
device_ids=device_ids,
from_time=start_interval,
to_time=end_interval,
event_types=self._event_types.keys())
if events is not None:
for event in events:
device_name = next((
dev.get('name') for dev in self._api.devices
if dev.get('id') == event['deviceId']), None)
self._hass.bus.async_fire(
'traccar_' + self._event_types.get(event["type"]), {
'device_traccar_id': event['deviceId'],
'device_name': device_name,
'type': event['type'],
'serverTime': event['serverTime'],
'attributes': event['attributes']
})
``` |
{
"source": "Jihaoyun/gem5",
"score": 3
} |
#### File: configs/fault_injector/ControlFaultParser.py
```python
import re
import sys
class Node:
def __init__(self, nodeType, nodeValue):
self.left = None
self.right = None
self.id = -1
self.nodeType = nodeType
self.nodeValue = nodeValue
class ControlFaultEntry:
trigger = None
action = None
def __init__(self, trigger, action):
self.trigger = trigger
self.action = action
class ControlFaultParser:
def __init__(self, fileName):
try:
self.faultsFile = open(fileName, "r")
except IOError:
raise
def hasNext(self):
# Read trigger string
self.currentLine = self.faultsFile.readline()
if self.currentLine == '':
return False
while self.currentLine[0] == '#':
self.currentLine = self.faultsFile.readline()
if self.currentLine == '':
return False
self.currentTriggerLine = self.currentLine
# Read action line
self.currentLine = self.faultsFile.readline()
if self.currentLine == '':
return False
while self.currentLine[0] == '#':
self.currentLine = self.faultsFile.readline()
if self.currentLine == '':
return False
self.currentActionLine = self.currentLine
return True
def next(self):
if self.currentLine[0] == '#':
return None
return ControlFaultEntry(
self.parseTriggerString(self.currentTriggerLine),
self.parseActionString(self.currentActionLine)
)
def clean(self, string):
string = string.replace(" ", "")
string = string.replace("\n", "")
strList = list(string)
if(strList[0] == '(' and strList[len(string)-1] == ')'):
strList[0] = ' '
strList[len(string)-1] = ' '
return ''.join(strList).replace(" ", "")
def parseTrigger(self, string):
string = self.clean(string)
opsre = r".*(&|\||\<|\>|=|\!).*"
#Check if final case
if re.match(opsre, string) is None:
if string == "index":
return Node("i", string)
else:
return Node("c", str(int(string, 16)))
#Find most extern operator
counter = 0;
for i in range(len(string)):
if re.match(opsre, string[i]) is not None \
and counter == 0:
op = string[i]
if(op == '!'):
if string[i+1] != '(':
sys.exit("Missing '(' After '!'")
tmpNode = Node("o", op)
tmpNode.right = \
self.parseTrigger(self.clean(string[(i+1):]))
else:
if(string[i+1] == '=' or \
string[i+1] == '&' or \
string[i+1] == '|'):
i+=1
op += string[i]
leftString = string[:(i-1)]
else:
leftString = string[:(i)]
rightString = string[(i+1):]
tmpNode = Node("o", op)
tmpNode.left = self.parseTrigger(self.clean(leftString))
tmpNode.right = self.parseTrigger(self.clean(rightString))
return tmpNode
else:
if(string[i] == '('):
counter += 1
if(string[i] == ')'):
counter -= 1
def parseAction(self, string):
#Check if final case
if re.match(r".*[&|^~<>].*", string) is None:
if string == "index":
return Node("i", string)
else:
return Node("c", str(int(string, 16)))
#Find most extern operator
counter = 0;
for i in range(len(string)):
if re.match(r".*[&|^~<>].*", string[i]) is not None \
and counter == 0:
op = string[i]
if(op == '~'):
if string[i+1] != '(':
sys.exit("Missing '(' After '~'")
tmpNode = Node("o", op)
tmpNode.right = \
self.parseAction(self.clean(string[(i+1):]))
else:
if(string[i+1] == '<' or string[i+1] == '>'):
i+=1
op += string[i]
leftString = string[:(i-1)]
else:
leftString = string[:(i)]
rightString = string[(i+1):]
tmpNode = Node("o", op)
tmpNode.left = self.parseAction(self.clean(leftString))
tmpNode.right = self.parseAction(self.clean(rightString))
return tmpNode
else:
if(string[i] == '('):
counter += 1
if(string[i] == ')'):
counter -= 1
def visit(self, n):
self.nodeString += str(n.id) + " " + n.nodeType + " " + \
n.nodeValue + " "
if n.left is not None:
self.edgeCount += 1
self.nodeCount += 1
n.left.id = self.nodeCount
self.edgeString += str(n.id) + " " + str(n.left.id) + " "
self.visit(n.left)
if n.right is not None:
self.edgeCount += 1
self.nodeCount += 1
n.right.id = self.nodeCount
self.edgeString += str(n.id) + " " + str(n.right.id) + " "
self.visit(n.right)
def parseTriggerString(self, string):
self.nodeCount = 0
self.edgeCount = 0
self.nodeString = ""
self.edgeString = ""
rootNode = self.parseTrigger(string)
rootNode.id = 0
self.visit(rootNode)
self.nodeCount += 1
return str(self.nodeCount) + " " + str(self.edgeCount) + " " \
+ self.nodeString + " " + self.edgeString
def parseActionString(self, string):
self.nodeCount = 0
self.edgeCount = 0
self.nodeString = ""
self.edgeString = ""
rootNode = self.parseAction(string)
rootNode.id = 0
self.visit(rootNode)
self.nodeCount += 1
return str(self.nodeCount) + " " + str(self.edgeCount) + " " \
+ self.nodeString + " " + self.edgeString
#Examples
if __name__ == "__main__":
p = ControlFaultParser("control_fault.txt")
n = 1
while p.hasNext():
cfe = p.next()
print cfe.trigger
print cfe.action
```
#### File: configs/fault_injector/FaultParser.py
```python
class FaultEntry:
def __init__(self, label, stuckBit, field, entry,
bitPosition, tickBegin, tickEnd):
self.label = label
self.stuckBit = stuckBit
self.field = field
self.entry = entry
self.bitPosition = bitPosition
self.permanent = (int(tickBegin) == 0 and int(tickEnd) == -1)
self.tickBegin = tickBegin
self.tickEnd = tickEnd
def __str__(self):
return ("Label: " + self.label + "\n" +
"StuckBit: " + str(self.stuckBit) + "\n" +
"Field: " + str(self.field) + "\n" +
"Entry: " + str(self.entry) + "\n" +
"BitPosition: " + str(self.bitPosition) + "\n" +
"Permanent: " + str(self.permanent) + "\n" +
"TickBegin: " + str(self.tickBegin) + "\n" +
"TickEnd: " + str(self.tickEnd) + "\n")
class FaultParser:
def __init__(self, fileName):
try:
self.faultsFile = open(fileName, "r")
except IOError:
raise
def hasNext(self):
self.currentLine = self.faultsFile.readline()
if self.currentLine == '':
return False
while self.currentLine[0] == '#' or\
len(self.currentLine.split(":")) < 2:
self.currentLine = self.faultsFile.readline()
if self.currentLine == '':
return False
return True
def next(self):
if self.currentLine[0] == '#' or len(self.currentLine.split(":")) < 2:
return None
# Delete all whitespaces
line = "".join(self.currentLine.strip().replace(" ", "").split())
label = line.split(":")[0]
entries = line.split(":")[1].split(",")
return FaultEntry(
label,
entries[0],
entries[1],
entries[2],
entries[3],
entries[4],
entries[5])
```
#### File: Jihaoyun/gem5/fault_generator.py
```python
import os
import argparse
parser = argparse.ArgumentParser(description = 'gem5 with fault injection')
parser.add_argument('-log', '--log-file', type = str, dest = 'logFile',
help = 'The input file of debug info of Gem5 simulator')
parser.add_argument('-in', '--input-fault', type = str, dest = 'faultFile',
help = 'The input file of faults')
parser.add_argument('-out', '--output-fault', type = str, dest = 'newFaultFile',
help = 'The output file of faults')
args = parser.parse_args()
class FaultEntry:
def __init__(self, stuckBit, category, reg, bitPosition, tick):
self.stuckBit = stuckBit
self.category = category
self.reg = reg
self.bitPosition = bitPosition
self.tick = tick
class RegFaultGenerator:
def __init__(self, filename):
try:
self.file = open(filename, "r")
except IOError:
raise
def setFault(self, stuckBit, category, reg, bitPosition, tick):
self.fault = FaultEntry(stuckBit, category, reg, bitPosition, tick)
def haveNext(self):
self.nextLine = self.file.readline().strip()
if self.nextLine == "":
return False
return True
def next(self):
currentLine = self.nextLine.replace(" ","").split(":")
if currentLine[2] == "PseudoInst" and currentLine[4] == "rpns()":
if eval(currentLine[0]) < eval(self.fault.tick):
faultLine = ",".join([self.fault.stuckBit, self.fault.category, self.fault.reg,\
self.fault.bitPosition, currentLine[0], currentLine[0]])
return faultLine
return ""
class FaultParser:
def __init__(self, filename):
try:
self.file = open(filename, "r")
except IOError:
raise
def haveNext(self):
self.nextLine = self.file.readline().strip()
if self.nextLine == "":
return False
if self.nextLine[0] == '#':
return False
return True
def next(self):
currentLine = self.nextLine.replace(" ","")
entries = currentLine.split(",")
return FaultEntry(entries[0], entries[1], entries[2], entries[3], entries[4])
if __name__ == '__main__':
newFaultFP = open(args.newFaultFile, "w")
faultFP = FaultParser(args.faultFile)
lineLabel = 0
while faultFP.haveNext():
fault = faultFP.next()
logFP = RegFaultGenerator(args.logFile)
logFP.setFault(fault.stuckBit, fault.category, fault.reg, fault.bitPosition, fault.tick)
while logFP.haveNext():
newLine = logFP.next()
if not newLine == "":
newFaultFP.write("FAULT" + str(lineLabel) + ":" + newLine + "\n")
lineLabel = lineLabel + 1
newFaultFP.close()
```
#### File: Jihaoyun/gem5/stats.py
```python
import sys
from os import listdir
from os.path import isdir
import argparse
# GOLDEN instance reference name
GOLDEN = 'GOLDEN.txt'
# Command line arguments
parser = argparse.ArgumentParser(description='Gem5 Stats')
parser.add_argument('-d', '--dir', type=str, dest='directory', required=True,
help='The root directory of the stats files to be parsed')
parser.add_argument('-g', '--graphical', dest='graphicalStats',
action='store_true',
help='It is true if we want to display graphical stats')
parser.set_defaults(graphicalStats=False)
parser.add_argument('-s', '--stats', type=str, dest='stats', required=True,
nargs='+', help='The statistics we want to display')
parser.add_argument('-c', '--csv', dest='csvStats',
action='store_true',
help='If true a CSV file will be generated for the given statistics')
parser.set_defaults(csvStats=False)
args = parser.parse_args()
# This data structure contains key-value fields where
# key is equal to a file name and value is equa to
# its respective parsed Stat object
statInstances = {}
# Print some simple statistics
def printstat(stats, props):
for p in props:
print p, "values"
for inst in stats:
print inst, stats[inst].get(p, 'not found')
# Display a barchart displaying the given statistics
def showgraph(stats, props):
# Iterate over all stats
for p in props:
# Store labels for the horizontal axis
labels = []
# Store values fro the vertical axis
values = []
for inst in stats:
labels.append(inst)
values.append(stats[inst].get(p, 0))
# Put GOLDEN value at first position
idx = labels.index(GOLDEN)
labels[idx], labels[0] = labels[0], labels[idx]
values[idx], values[0] = values[0], values[idx]
# Plot labels and values
fig, ax = plt.subplots()
rects = ax.bar(np.arange(len(labels)), tuple(values),
width=0.3, color='r')
ax.set_ylabel(p)
ax.set_title('Variation of ' + p)
ax.set_xticklabels(tuple(labels))
ax.set_xticks(np.arange(len(labels)) + 0.3 / 2)
autolabel(rects, ax)
plt.show()
# Create a CSV file containing one row per instance and the requested stats
def createcsv(stats, props):
with open('stats.csv', 'w') as csvfile:
swriter = csv.writer(csvfile,
delimiter=",",quotechar="|",quoting=csv.QUOTE_MINIMAL)
# Iterate over all instances
for inst in stats:
row = []
row.append(inst) # Instance name should be the first entry
# Iterate all the requested stats
for p in props:
row.append(stats[inst].get(p, 0))
# Write the CSV entry
swriter.writerow(row)
# Attach a text label above each bar displaying its height
def autolabel(rects, ax):
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
# List all files in the directory
if __name__ == "__main__":
# Load statistics
# Star by listing all the files in the directory
for f in listdir(args.directory):
# If the file is a directory skip it
if isdir(f):
continue
# Otherwise start reading the file
with open("/".join([args.directory, f])) as statFile:
# Create the new statistic object
stat = {}
# Read all statistics
for line in statFile:
fields = line.split()
# if the line has less then 2 fields, skip it
if len(fields) < 2:
continue
# If the first character of the first field is '-'
# it means that the line is something written as
# an info message by Gem5
if len(fields[0]) > 0 and fields[0][0] == "-":
continue
# Otherwise get its key-value pair
# And store it into the Stat object
stat[fields[0]] = fields[1]
# Store the stat object
statInstances[f] = stat
# If we are in command line mode just display the requested statistics
if not args.graphicalStats:
printstat(statInstances, args.stats)
# Else show some graphics
else:
# Import the requested libraries
import numpy as np
import matplotlib.pyplot as plt
# Plot stats
showgraph(statInstances, args.stats)
# Create the CSV stat file if required
if args.csvStats:
# Import the requested library
import csv
# Generate CSV file
createcsv(statInstances, args.stats);
```
#### File: util/cpt_upgraders/etherswitch.py
```python
def upgrader(cpt):
for sec in cpt.sections():
if sec == "system":
options = cpt.items(sec)
for it in options:
opt_split = it[0].split('.')
new_sec_name = opt_split[1]
old_opt_name = opt_split[len(opt_split) - 1]
if "outputFifo" in new_sec_name:
new_sec_name = new_sec_name.rstrip("outputFifo")
new_sec_name += ".outputFifo"
new_sec_name = "system.system.%s" %(new_sec_name)
if not cpt.has_section(new_sec_name):
cpt.add_section(new_sec_name)
if old_opt_name == "size":
cpt.set(new_sec_name, "_size", it[1])
elif old_opt_name == "packets":
cpt.set(new_sec_name, "fifosize", it[1])
else:
cpt.set(new_sec_name, old_opt_name, it[1])
cpt.remove_option(sec, it[0])
``` |
{
"source": "JIHarrison/BundleTool",
"score": 2
} |
#### File: JIHarrison/BundleTool/misc_Dialog_ui.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_misc_Dialog(object):
def setupUi(self, misc_Dialog):
misc_Dialog.setObjectName("misc_Dialog")
misc_Dialog.resize(442, 500)
self.buttonBox = QtWidgets.QDialogButtonBox(misc_Dialog)
self.buttonBox.setGeometry(QtCore.QRect(210, 450, 221, 41))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setCenterButtons(False)
self.buttonBox.setObjectName("buttonBox")
self.gaskets_label = QtWidgets.QLabel(misc_Dialog)
self.gaskets_label.setGeometry(QtCore.QRect(30, 160, 47, 13))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.gaskets_label.setFont(font)
self.gaskets_label.setObjectName("gaskets_label")
self.shop_bundle_label = QtWidgets.QLabel(misc_Dialog)
self.shop_bundle_label.setGeometry(QtCore.QRect(30, 360, 91, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.shop_bundle_label.setFont(font)
self.shop_bundle_label.setObjectName("shop_bundle_label")
self.hex_label = QtWidgets.QLabel(misc_Dialog)
self.hex_label.setGeometry(QtCore.QRect(30, 280, 51, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.hex_label.setFont(font)
self.hex_label.setObjectName("hex_label")
self.studs_label = QtWidgets.QLabel(misc_Dialog)
self.studs_label.setGeometry(QtCore.QRect(30, 240, 47, 13))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.studs_label.setFont(font)
self.studs_label.setObjectName("studs_label")
self.gaskets2_label = QtWidgets.QLabel(misc_Dialog)
self.gaskets2_label.setGeometry(QtCore.QRect(30, 200, 47, 13))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.gaskets2_label.setFont(font)
self.gaskets2_label.setObjectName("gaskets2_label")
self.redraw_label = QtWidgets.QLabel(misc_Dialog)
self.redraw_label.setGeometry(QtCore.QRect(30, 320, 47, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.redraw_label.setFont(font)
self.redraw_label.setObjectName("redraw_label")
self.tubesheet_label = QtWidgets.QLabel(misc_Dialog)
self.tubesheet_label.setGeometry(QtCore.QRect(16, 40, 61, 20))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.tubesheet_label.setFont(font)
self.tubesheet_label.setObjectName("tubesheet_label")
self.baffles_label = QtWidgets.QLabel(misc_Dialog)
self.baffles_label.setGeometry(QtCore.QRect(30, 120, 47, 13))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.baffles_label.setFont(font)
self.baffles_label.setObjectName("baffles_label")
self.tubes_label = QtWidgets.QLabel(misc_Dialog)
self.tubes_label.setGeometry(QtCore.QRect(30, 80, 47, 13))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.tubes_label.setFont(font)
self.tubes_label.setObjectName("tubes_label")
self.part_number_lineEdit = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit.setGeometry(QtCore.QRect(130, 40, 113, 20))
self.part_number_lineEdit.setObjectName("part_number_lineEdit")
self.part_number_lineEdit_2 = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit_2.setGeometry(QtCore.QRect(130, 80, 113, 20))
self.part_number_lineEdit_2.setObjectName("part_number_lineEdit_2")
self.part_number_lineEdit_3 = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit_3.setGeometry(QtCore.QRect(130, 120, 113, 20))
self.part_number_lineEdit_3.setObjectName("part_number_lineEdit_3")
self.part_number_lineEdit_4 = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit_4.setGeometry(QtCore.QRect(130, 160, 113, 20))
self.part_number_lineEdit_4.setObjectName("part_number_lineEdit_4")
self.part_number_lineEdit_5 = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit_5.setGeometry(QtCore.QRect(130, 200, 113, 20))
self.part_number_lineEdit_5.setObjectName("part_number_lineEdit_5")
self.part_number_lineEdit_6 = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit_6.setGeometry(QtCore.QRect(130, 240, 113, 20))
self.part_number_lineEdit_6.setObjectName("part_number_lineEdit_6")
self.part_number_lineEdit_7 = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit_7.setGeometry(QtCore.QRect(130, 280, 113, 20))
self.part_number_lineEdit_7.setObjectName("part_number_lineEdit_7")
self.part_number_lineEdit_8 = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit_8.setGeometry(QtCore.QRect(130, 320, 113, 20))
self.part_number_lineEdit_8.setObjectName("part_number_lineEdit_8")
self.part_number_lineEdit_9 = QtWidgets.QLineEdit(misc_Dialog)
self.part_number_lineEdit_9.setGeometry(QtCore.QRect(130, 360, 113, 20))
self.part_number_lineEdit_9.setObjectName("part_number_lineEdit_9")
self.parts_number_label = QtWidgets.QLabel(misc_Dialog)
self.parts_number_label.setGeometry(QtCore.QRect(140, 10, 91, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.parts_number_label.setFont(font)
self.parts_number_label.setObjectName("parts_number_label")
self.spinBox = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox.setGeometry(QtCore.QRect(250, 40, 42, 22))
self.spinBox.setObjectName("spinBox")
self.spinBox_2 = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox_2.setGeometry(QtCore.QRect(250, 80, 42, 22))
self.spinBox_2.setObjectName("spinBox_2")
self.spinBox_3 = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox_3.setGeometry(QtCore.QRect(250, 120, 42, 22))
self.spinBox_3.setObjectName("spinBox_3")
self.spinBox_4 = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox_4.setGeometry(QtCore.QRect(250, 160, 42, 22))
self.spinBox_4.setObjectName("spinBox_4")
self.spinBox_5 = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox_5.setGeometry(QtCore.QRect(250, 200, 42, 22))
self.spinBox_5.setObjectName("spinBox_5")
self.spinBox_6 = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox_6.setGeometry(QtCore.QRect(250, 240, 42, 22))
self.spinBox_6.setObjectName("spinBox_6")
self.spinBox_7 = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox_7.setGeometry(QtCore.QRect(250, 280, 42, 22))
self.spinBox_7.setObjectName("spinBox_7")
self.spinBox_8 = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox_8.setGeometry(QtCore.QRect(250, 320, 42, 22))
self.spinBox_8.setObjectName("spinBox_8")
self.spinBox_9 = QtWidgets.QSpinBox(misc_Dialog)
self.spinBox_9.setGeometry(QtCore.QRect(250, 360, 42, 22))
self.spinBox_9.setObjectName("spinBox_9")
self.tubesheet_unit_cost_lineEdit = QtWidgets.QLineEdit(misc_Dialog)
self.tubesheet_unit_cost_lineEdit.setGeometry(QtCore.QRect(310, 40, 113, 20))
self.tubesheet_unit_cost_lineEdit.setObjectName("tubesheet_unit_cost_lineEdit")
self.tubes_unit_cost_lineEdit = QtWidgets.QLineEdit(misc_Dialog)
self.tubes_unit_cost_lineEdit.setGeometry(QtCore.QRect(310, 80, 113, 20))
self.tubes_unit_cost_lineEdit.setObjectName("tubes_unit_cost_lineEdit")
self.baffles_unit_cost_lineEdit = QtWidgets.QLineEdit(misc_Dialog)
self.baffles_unit_cost_lineEdit.setGeometry(QtCore.QRect(310, 120, 113, 20))
self.baffles_unit_cost_lineEdit.setObjectName("baffles_unit_cost_lineEdit")
self.gaskets_unit_cost_lineEdit_1 = QtWidgets.QLineEdit(misc_Dialog)
self.gaskets_unit_cost_lineEdit_1.setGeometry(QtCore.QRect(310, 160, 113, 20))
self.gaskets_unit_cost_lineEdit_1.setObjectName("gaskets_unit_cost_lineEdit_1")
self.gaskets_unit_cost_lineEdit_2 = QtWidgets.QLineEdit(misc_Dialog)
self.gaskets_unit_cost_lineEdit_2.setGeometry(QtCore.QRect(310, 200, 113, 20))
self.gaskets_unit_cost_lineEdit_2.setObjectName("gaskets_unit_cost_lineEdit_2")
self.studs_unit_cost_lineEdit = QtWidgets.QLineEdit(misc_Dialog)
self.studs_unit_cost_lineEdit.setGeometry(QtCore.QRect(310, 240, 113, 20))
self.studs_unit_cost_lineEdit.setObjectName("studs_unit_cost_lineEdit")
self.hex_nuts_unit_cost_lineEdit = QtWidgets.QLineEdit(misc_Dialog)
self.hex_nuts_unit_cost_lineEdit.setGeometry(QtCore.QRect(310, 280, 113, 20))
self.hex_nuts_unit_cost_lineEdit.setObjectName("hex_nuts_unit_cost_lineEdit")
self.redraw_cost_lineEdit = QtWidgets.QLineEdit(misc_Dialog)
self.redraw_cost_lineEdit.setGeometry(QtCore.QRect(310, 320, 113, 20))
self.redraw_cost_lineEdit.setObjectName("redraw_cost_lineEdit")
self.shop_hours_cost_lineEdit = QtWidgets.QLineEdit(misc_Dialog)
self.shop_hours_cost_lineEdit.setGeometry(QtCore.QRect(310, 360, 113, 20))
self.shop_hours_cost_lineEdit.setObjectName("shop_hours_cost_lineEdit")
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(misc_Dialog)
self.doubleSpinBox.setGeometry(QtCore.QRect(240, 410, 61, 22))
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.markup_label = QtWidgets.QLabel(misc_Dialog)
self.markup_label.setGeometry(QtCore.QRect(240, 390, 47, 13))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.markup_label.setFont(font)
self.markup_label.setObjectName("markup_label")
self.label = QtWidgets.QLabel(misc_Dialog)
self.label.setGeometry(QtCore.QRect(330, 410, 91, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label.setFont(font)
self.label.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.label.setText("")
self.label.setObjectName("label")
self.per_item_label = QtWidgets.QLabel(misc_Dialog)
self.per_item_label.setGeometry(QtCore.QRect(320, 10, 81, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.per_item_label.setFont(font)
self.per_item_label.setObjectName("per_item_label")
self.total_cost_label = QtWidgets.QLabel(misc_Dialog)
self.total_cost_label.setGeometry(QtCore.QRect(330, 390, 71, 16))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.total_cost_label.setFont(font)
self.total_cost_label.setObjectName("total_cost_label")
self.retranslateUi(misc_Dialog)
self.buttonBox.accepted.connect(misc_Dialog.accept)
self.buttonBox.rejected.connect(misc_Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(misc_Dialog)
misc_Dialog.setTabOrder(self.part_number_lineEdit, self.spinBox)
misc_Dialog.setTabOrder(self.spinBox, self.tubesheet_unit_cost_lineEdit)
misc_Dialog.setTabOrder(self.tubesheet_unit_cost_lineEdit, self.part_number_lineEdit_2)
misc_Dialog.setTabOrder(self.part_number_lineEdit_2, self.spinBox_2)
misc_Dialog.setTabOrder(self.spinBox_2, self.tubes_unit_cost_lineEdit)
misc_Dialog.setTabOrder(self.tubes_unit_cost_lineEdit, self.part_number_lineEdit_3)
misc_Dialog.setTabOrder(self.part_number_lineEdit_3, self.spinBox_3)
misc_Dialog.setTabOrder(self.spinBox_3, self.baffles_unit_cost_lineEdit)
misc_Dialog.setTabOrder(self.baffles_unit_cost_lineEdit, self.part_number_lineEdit_4)
misc_Dialog.setTabOrder(self.part_number_lineEdit_4, self.spinBox_4)
misc_Dialog.setTabOrder(self.spinBox_4, self.gaskets_unit_cost_lineEdit_1)
misc_Dialog.setTabOrder(self.gaskets_unit_cost_lineEdit_1, self.part_number_lineEdit_5)
misc_Dialog.setTabOrder(self.part_number_lineEdit_5, self.spinBox_5)
misc_Dialog.setTabOrder(self.spinBox_5, self.gaskets_unit_cost_lineEdit_2)
misc_Dialog.setTabOrder(self.gaskets_unit_cost_lineEdit_2, self.part_number_lineEdit_6)
misc_Dialog.setTabOrder(self.part_number_lineEdit_6, self.spinBox_6)
misc_Dialog.setTabOrder(self.spinBox_6, self.studs_unit_cost_lineEdit)
misc_Dialog.setTabOrder(self.studs_unit_cost_lineEdit, self.part_number_lineEdit_7)
misc_Dialog.setTabOrder(self.part_number_lineEdit_7, self.spinBox_7)
misc_Dialog.setTabOrder(self.spinBox_7, self.hex_nuts_unit_cost_lineEdit)
misc_Dialog.setTabOrder(self.hex_nuts_unit_cost_lineEdit, self.part_number_lineEdit_8)
misc_Dialog.setTabOrder(self.part_number_lineEdit_8, self.spinBox_8)
misc_Dialog.setTabOrder(self.spinBox_8, self.redraw_cost_lineEdit)
misc_Dialog.setTabOrder(self.redraw_cost_lineEdit, self.part_number_lineEdit_9)
misc_Dialog.setTabOrder(self.part_number_lineEdit_9, self.spinBox_9)
misc_Dialog.setTabOrder(self.spinBox_9, self.shop_hours_cost_lineEdit)
misc_Dialog.setTabOrder(self.shop_hours_cost_lineEdit, self.doubleSpinBox)
def retranslateUi(self, misc_Dialog):
_translate = QtCore.QCoreApplication.translate
misc_Dialog.setWindowTitle(_translate("misc_Dialog", "Dialog"))
self.gaskets_label.setText(_translate("misc_Dialog", "Gaskets"))
self.shop_bundle_label.setText(_translate("misc_Dialog", "Shop Hours"))
self.hex_label.setText(_translate("misc_Dialog", "Hex Nuts"))
self.studs_label.setText(_translate("misc_Dialog", "Studs"))
self.gaskets2_label.setText(_translate("misc_Dialog", "Gaskets"))
self.redraw_label.setText(_translate("misc_Dialog", "Redraw"))
self.tubesheet_label.setText(_translate("misc_Dialog", "Tubesheet"))
self.baffles_label.setText(_translate("misc_Dialog", "Baffles"))
self.tubes_label.setText(_translate("misc_Dialog", "Tubes"))
self.parts_number_label.setText(_translate("misc_Dialog", "Parts Numbers"))
self.markup_label.setText(_translate("misc_Dialog", "Mark Up"))
self.per_item_label.setText(_translate("misc_Dialog", "Cost Per Item"))
self.total_cost_label.setText(_translate("misc_Dialog", "Total Cost:"))
``` |
{
"source": "jihernrod/cabrera",
"score": 2
} |
#### File: cabrera/configuration/config.py
```python
CONF ={
"Stocks": ["T", "LOG.MC", "INTC", "BN.PA", "ENG.MC", "BBVA.MC", "NTGY.MC", "TEF.MC", "MAP.MC", "CSCO", "KO"],
"Fundamental_url": "https://www.alphavantage.co/query?function=OVERVIEW&symbol=%s&apikey=G762XMM5O6NTMLMI",
"IncomeStatement_url": "https://www.alphavantage.co/query?function=INCOME_STATEMENT&symbol=%s&apikey=G762XMM5O6NTMLMI",
"BalanceSheet_url": "https://www.alphavantage.co/query?function=BALANCE_SHEET&symbol=%s&apikey=G762XMM5O6NTMLMI",
"CashFlow_url": "https://www.alphavantage.co/query?function=CASH_FLOW&symbol=%s&apikey=G762XMM5O6NTMLMI",
"Earnings_url": "https://www.alphavantage.co/query?function=EARNINGS&symbol=%s&apikey=G762XMM5O6NTMLMI",
"Fundamental_file_name": "fundamental_%s.csv",
"IncomeStatement_file_name": "incomeStatement_%s.csv",
"BalanceSheet_file_name": "balanceSheet_%s.csv",
"CashFlow_file_name": "cashFlow_%s.csv",
"Earnings_file_name": "earnings_%s.csv",
"Download_folder": "D:\\tmp",
"Download_Delay": 15
}
def get(key, default_value = None):
return CONF.get(key, default_value)
```
#### File: cabrera/market_downloader/fundamentals.py
```python
import requests
import pandas as pd
import time
import os
import datetime
import configuration.config as default_configuration
stock_list = default_configuration.get("Stocks")
now = datetime.datetime.now()
day_str = now.strftime("%Y%m%d")
class NotReturnData(Exception):
pass
def download_fundamental_data(fundatmental_url, fundamental_file_name, output_adapter_functor = None):
list_stocks = []
for stock in stock_list:
print("Getting info [%s] of [%s]" % (fundatmental_url, stock))
response = requests.get(
fundatmental_url % stock)
if output_adapter_functor:
try:
list_stocks.extend(output_adapter_functor(response.json()))
except NotReturnData as nrd:
print ("Err: "+ str(nrd))
else:
list_stocks.append(response.json())
time.sleep(default_configuration.get("Download_Delay"))
dataframe = pd.DataFrame(list_stocks)
file_name = fundamental_file_name % (day_str)
dataframe.to_csv(
os.path.join(default_configuration.get("Download_folder"), file_name))
print("Export info [%s]" % file_name)
def adapter_annual_reports(x):
if "annualReports" not in x:
raise NotReturnData("Data is not completed")
for report in x["annualReports"]:
report["symbol"] =x["symbol"]
return x["annualReports"]
def adapter_annual_earnings(x):
if "annualEarnings" not in x:
raise NotReturnData("Data is not completed")
for report in x["annualEarnings"]:
report["symbol"] = x["symbol"]
return x["annualEarnings"]
if __name__ == "__main__":
urls = ["IncomeStatement_url",
"BalanceSheet_url",
"CashFlow_url"]
files_to_export = [
"IncomeStatement_file_name",
"BalanceSheet_file_name",
"CashFlow_file_name"]
for url, file in zip(urls, files_to_export):
download_fundamental_data(default_configuration.get(url),
default_configuration.get(file), output_adapter_functor=lambda x: adapter_annual_reports(x))
download_fundamental_data(default_configuration.get("Earnings_url"),
default_configuration.get("Earnings_file_name"), output_adapter_functor=lambda x: adapter_annual_earnings(x))
download_fundamental_data(default_configuration.get("Fundamental_url"),
default_configuration.get("Fundamental_file_name"))
```
#### File: cabrera/test/Actions.py
```python
import unittest
import actions.getDashBoard as getDashBoardAction
import adapters.getDashBoardMatrixAdapter as getDashBoardMatrixAdapter
class TestActionsClass(unittest.TestCase):
def test_GetDashBoardAction(self):
getDashBoardMatrixAdapter.GetDashBoardActionMatrixAdapter().adapt(getDashBoardAction.GetDashBoardAction().do())
if __name__ == '__main__':
unittest.main()
```
#### File: cabrera/yinterface/yInterface.py
```python
import yfinance as yf
import pprint
import os
import pandas
def get_ticker_info(list_tickers = [], period = "5y"):
ticker_info = {}
for ticker in list_tickers:
msft = yf.Ticker(ticker)
ticker_info[ticker]={}
# get stock info
ticker_info[ticker]["stock_info"] = msft.info
# get historical market data
hist = msft.history(period=period)
ticker_info[ticker]["hist_market_data"] = hist
# show actions (dividends, splits)
ticker_info[ticker]["actions"] = msft.actions
# show dividends
ticker_info[ticker]["dividends"] = msft.dividends
# show splits
ticker_info[ticker]["splits"] = msft.splits
# show financials
ticker_info[ticker]["financials"] = msft.financials
ticker_info[ticker]["quarterly_financials"] = msft.quarterly_financials
# show major holders
ticker_info[ticker]["major_holders"] = msft.major_holders
# show institutional holders
ticker_info[ticker]["institutional_holders"] = msft.institutional_holders
# show balance sheet
ticker_info[ticker]["balance_sheet"] = msft.balance_sheet
ticker_info[ticker]["quarterly_balance_sheet"] = msft.quarterly_balance_sheet
# show cashflow
ticker_info[ticker]["cashflow"] = msft.cashflow
ticker_info[ticker]["quarterly_cashflow"] = msft.quarterly_cashflow
# show earnings
ticker_info[ticker]["earnings"] = msft.earnings
ticker_info[ticker]["quarterly_earnings"] = msft.quarterly_earnings
# show sustainability
ticker_info[ticker]["sustainability"] = msft.sustainability
# show analysts recommendations
ticker_info[ticker]["recommendations"] = msft.recommendations
# show next event (earnings, etc)
ticker_info[ticker]["calendar"] = msft.calendar
# show ISIN code - *experimental*
# ISIN = International Securities Identification Number
ticker_info[ticker]["isin"] = msft.isin
return ticker_info
def dump_yfinanze(list_tickers, path):
for ticker, value in list_tickers.items():
new_dict = {}
for key, factor in value.items():
if isinstance(factor, pandas.DataFrame) :
new_dict[key] = factor.to_dict('records')
elif isinstance(factor, pandas.Series):
new_dict[key] = factor.to_dict()
else:
new_dict[key]= factor
with open(os.path.join(path, ticker+".pprint"), 'w') as f:
pprint.pprint(new_dict, f)
if __name__=="__main__":
dump_yfinanze(get_ticker_info(['MAP.MC']), "d:\\tmp\\dumps")
``` |
{
"source": "jiherrero4/spark",
"score": 3
} |
#### File: jiherrero4/spark/creaSala.py
```python
import json
import os
import requests
import sys
createRoom()
def createRoom():
# Define header used for authentication
myToken="<KEY>"
roomTitle="PruebaCreacionSala"
headers = { "Authorization": "Bearer "+myToken, "Content-type": "application/json" }
# Define the action to be taken in the HTTP request
roomInfo = { "title": roomTitle }
# Execute HTTP POST request to create the Spark Room
r = requests.post("https://api.ciscospark.com/v1/rooms",headers=headers, json=roomInfo)
room = r.json()
# Print the result of the HTTP POST request
print(room)
``` |
{
"source": "jihjihk/torchtech",
"score": 3
} |
#### File: torchtech/source/test2.py
```python
from flask import Flask, request, redirect
import urllib
import json
import codecs
import sqlite3
import geopy
import requests
from geopy.distance import vincenty
from geopy.geocoders import Nominatim
import xml.etree.ElementTree as ET
import os
from twilio.rest import Client
from twilio.twiml.messaging_response import MessagingResponse
def initialize():
url = "https://api.myjson.com/bins/hkjsr"
data = urllib.urlopen(url).read()
trees = json.loads(data)
conn = sqlite3.connect('hack1.sqlite')
cur = conn.cursor()
cur.executescript('''
DROP TABLE IF EXISTS People;
CREATE TABLE People (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE,
num TEXT UNIQUE,
location TEXT UNIQUE
)''')
for tree in trees:
name = tree['name']
num = tree['num']
location = tree['location']
cur.execute('''INSERT OR IGNORE INTO People (name,num,location)
VALUES ( ? , ? , ? )''', ( name,num,location ) )
conn.commit()
return
def twil(num,user_add):
account_sid = 'ACed10e34bac3a56da30b364c7eb639799'
auth_token = '<PASSWORD>'
client = Client(account_sid,auth_token)
client.messages.create(
to = num,
from_ = "+13475149453",
body = "SOS. Help needed at location : %s"%(user_add)
)
return 1 #see if person said yes or no later
def smallest(back):
freegeoip = "http://freegeoip.net/json"
geo_r = requests.get(freegeoip)
geo_json = json.loads(geo_r.text)
user_position = [geo_json["latitude"], geo_json["longitude"]]
lat_lon_sos = (user_position[0], user_position[1])
user_add = Nominatim().reverse(lat_lon_sos)
conn = sqlite3.connect('hack1.sqlite')
cursor = conn.cursor()
table = cursor.execute("SELECT * FROM People;")
nearest = None
curr = None
thisname = None
thisnum = None
rows = 0
for row in table:
rows += 1
for i in range (1,rows+1):
cursor.execute('''SELECT max(id) FROM People''')
i = cursor.fetchone()[0]
cursor.execute('''SELECT name FROM People WHERE id = (?)''',(i,))
name = cursor.fetchone()[0]
cursor.execute('''SELECT num FROM People WHERE id = (?)''',(i,))
num = cursor.fetchone()[0]
cursor.execute('''SELECT location FROM People WHERE id = (?)''',(i,))
location = cursor.fetchone()[0]
loc = Nominatim().geocode(location)
row_loc = (loc.latitude, loc.longitude)
dist = vincenty(lat_lon_sos, row_loc).miles
if nearest == None or dist < nearest:
nearest = dist
thisname = name
thisnum = num
place = curr
back.append([name,num,location])
cursor.execute(''' DELETE FROM People WHERE name = ( ? )''', (thisname, ))
conn.commit()
return twil(thisnum,user_add)
def sending():
back = list()
first = smallest(back)
while True:
if first != 1:
first = smallest()
else:
break
conny = sqlite3.connect('hack1.sqlite')
curry = conny.cursor()
for item in back:
curry.execute('''INSERT OR IGNORE INTO People (name,num,location)
VALUES ( ? , ? , ? )''', (item[0],item[1],item[2] ) )
def main():
initialize()
sending()
``` |
{
"source": "jiho2007/mfrac",
"score": 2
} |
#### File: mfrac/mfrac/error.py
```python
class FractionError(Exception):
def __init__(self, msg):
super().__init__(msg)
```
#### File: mfrac/mfrac/frac.py
```python
try:
from .error import FractionError
except:
FractionError = TypeError
class frac:
def __init__(self, n, m=1):
try:
float(n)+float(m) #check
except:
raise FractionError('Invalid Argument Type')
if type(n) == frac:
self.n = n.n
self.n = n.m
self.n = n #분자
self.m = m #분모
if type(n) == str:
self.n = float(n)
if type(m) == str:
self.m = float(m)
if type(n) != int and n % 1 == 0: #int화
self.n = int(n)
if type(m) != int and m % 1 == 0:
self.m = int(m)
def __repr__(self): #str로 변환
return 'frac({}, {})'.format(self.n, self.m)
def __str__(self):
return '{}/{}'.format(self.n, self.m)
def __format__(self, s):
return self.__str__()
def __reversed__(self):
return frac(self.m, self.n)
def __add__(self, f): #덧셈
if type(f) == frac:
return frac(self.n*f.m + self.m*f.n, self.m * f.m).reduc()
return frac(self.n + f*self.m, self.m).reduc()
def __sub__(self, f): #뺄셈
if type(f) == frac:
if self.n * f.m > self.m * f.n:
rn = self.n*f.m - self.m*f.n
elif self.n * f.m < self.m * f.n:
rn = self.m*f.n - self.n*f.m
else:
return 0
return frac(rn, self.m*f.m).reduc()
return self.__sub__(frac(f))
def __mul__(self, f): #곱셈
if type(f) == frac:
return frac(self.n*f.n, self.m) / f.m
return frac(self.n*f, self.m).reduc()
def __truediv__(self, f): #나눗셈
if type(f) == frac:
return self.__mul__(frac(f.m, f.n))
return frac(self.n, self.m*f).reduc()
def __radd__(self, f): #순서반대 덧셈
return self.__add__(f)
def __rsub__(self, f): #순서반대 뺄셈
if type(f) == frac:
return f.__sub__(self)
return self.__sub__(frac(self.m*f, self.m))
def __rmul__(self, f): #순서반대 곱셈
return self._mul__(f)
def __rtruediv__(self, f): #순서반대 나눗셈
if type(f) == frac:
return f.__truediv__(self)
return frac(self.m, self.n).__mul__(f)
def __eq__(self, f): #같은지 비교
if type(f) == frac:
a = self.common(f)
b = f.common(self)
return a.n == b.n and a.m == b.m
return self.__eq__(frac(f))
def __ne__(self, f): #같지 않은지 비교
return not self.__eq__(f)
def __lt__(self, f): #작은지 비교
if type(f) == frac:
a = self.common(f)
b = f.common(self)
return a.n < b.n and a.m == b.m
return self.__lt__(frac(f))
def __le__(self, f): #작거나 같은지 비교
if type(f) == frac:
a = self.common(f)
b = f.common(self)
return a.n <= b.n and a.m == b.m
return self.__le__(frac(f))
def __gt__(self, f): #큰지 비교
if type(f) == frac:
a = self.common(f)
b = f.common(self)
return a.n > b.n and a.m == b.m
return self.__lt__(frac(f))
def __ge__(self, f): #크거나 같은지 비교
if type(f) == frac:
a = self.common(f)
b = f.common(self)
return a.n >= b.n and a.m == b.m
return self.__le__(frac(f))
def __float__(self): #소수
return float(self.n) / float(self.m)
def __int__(self): #소수버림 값
return int(self.float())
def __mod__(self, f): #n과 m 설정 메서드
if type(f) == frac:
self.__init__(f)
a = list(f) #check that f is iterable
self.n, self.m = f
return self
def reduc(self): #약분
if self.m<self.n:
c=self.m
elif self.n<self.m:
c=self.n
else:
return frac(1)
for i in range(c, 0, -1):
if self.n%i == 0 and self.m%i == 0:
return frac(self.n/i, self.m/i)
return frac(self.n, self.m)
def common(self, f): #통분
if type(f) == frac:
return frac(self.n*f.m, self.m*f.m)
return frac(self.n*f, self.m*f)
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.