prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = | Period(freq='D', year=2006, month=9, day=30) | pandas.Period |
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from sklearn.preprocessing import MinMaxScaler
import random
MAXLIFE = 120
SCALE = 1
RESCALE = 1
true_rul = []
test_engine_id = 0
training_engine_id = 0
def kink_RUL(cycle_list, max_cycle):
'''
Piecewise linear function with zero gradient and unit gradient
^
|
MAXLIFE |-----------
| \
| \
| \
| \
| \
|----------------------->
'''
knee_point = max_cycle - MAXLIFE
kink_RUL = []
stable_life = MAXLIFE
for i in range(0, len(cycle_list)):
if i < knee_point:
kink_RUL.append(MAXLIFE)
else:
tmp = kink_RUL[i - 1] - (stable_life / (max_cycle - knee_point))
kink_RUL.append(tmp)
return kink_RUL
def compute_rul_of_one_id(FD00X_of_one_id, max_cycle_rul=None):
'''
Enter the data of an engine_id of train_FD001 and output the corresponding RUL (remaining life) of these data.
type is list
'''
cycle_list = FD00X_of_one_id['cycle'].tolist()
if max_cycle_rul is None:
max_cycle = max(cycle_list) # Failure cycle
else:
max_cycle = max(cycle_list) + max_cycle_rul
# print(max(cycle_list), max_cycle_rul)
# return kink_RUL(cycle_list,max_cycle)
return kink_RUL(cycle_list, max_cycle)
def compute_rul_of_one_file(FD00X, id='engine_id', RUL_FD00X=None):
'''
Input train_FD001, output a list
'''
rul = []
# In the loop train, each id value of the 'engine_id' column
if RUL_FD00X is None:
for _id in set(FD00X[id]):
rul.extend(compute_rul_of_one_id(FD00X[FD00X[id] == _id]))
return rul
else:
rul = []
for _id in set(FD00X[id]):
# print("#### id ####", int(RUL_FD00X.iloc[_id - 1]))
true_rul.append(int(RUL_FD00X.iloc[_id - 1]))
rul.extend(compute_rul_of_one_id(FD00X[FD00X[id] == _id], int(RUL_FD00X.iloc[_id - 1])))
return rul
def get_CMAPSSData(save=False, save_training_data=True, save_testing_data=True, files=[1, 2, 3, 4, 5],
min_max_norm=False):
'''
:param save: switch to load the already preprocessed data or begin preprocessing of raw data
:param save_training_data: same functionality as 'save' but for training data only
:param save_testing_data: same functionality as 'save' but for testing data only
:param files: to indicate which sub dataset needed to be loaded for operations
:param min_max_norm: switch to enable min-max normalization
:return: function will save the preprocessed training and testing data as numpy objects
'''
if save == False:
return np.load("normalized_train_data.npy"), np.load("normalized_test_data.npy"), pd.read_csv(
'normalized_train_data.csv', index_col=[0]), pd.read_csv('normalized_test_data.csv', index_col=[0])
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
if save_training_data: ### Training ###
train_FD001 = pd.read_table("./CMAPSSData/train_FD001.txt", header=None, delim_whitespace=True)
train_FD002 = pd.read_table("./CMAPSSData/train_FD002.txt", header=None, delim_whitespace=True)
train_FD003 = pd.read_table("./CMAPSSData/train_FD003.txt", header=None, delim_whitespace=True)
train_FD004 = pd.read_table("./CMAPSSData/train_FD004.txt", header=None, delim_whitespace=True)
train_FD001.columns = column_name
train_FD002.columns = column_name
train_FD003.columns = column_name
train_FD004.columns = column_name
previous_len = 0
frames = []
for data_file in ['train_FD00' + str(i) for i in files]: # load subdataset by subdataset
#### standard normalization ####
mean = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].mean()
std = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].std()
std.replace(0, 1, inplace=True)
# print("std", std)
################################
if min_max_norm:
scaler = MinMaxScaler()
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = scaler.fit_transform(
eval(data_file).iloc[:, 2:len(list(eval(data_file)))])
else:
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = (eval(data_file).iloc[:, 2:len(
list(eval(data_file)))] - mean) / std
eval(data_file)['RUL'] = compute_rul_of_one_file(eval(data_file))
current_len = len(eval(data_file))
# print(eval(data_file).index)
eval(data_file).index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
# print(eval(data_file).index)
frames.append(eval(data_file))
print(data_file)
train = pd.concat(frames)
global training_engine_id
training_engine_id = train['engine_id']
train = train.drop('engine_id', 1)
train = train.drop('cycle', 1)
# if files[0] == 1 or files[0] == 3:
# train = train.drop('setting3', 1)
# train = train.drop('s18', 1)
# train = train.drop('s19', 1)
train_values = train.values * SCALE
np.save('normalized_train_data.npy', train_values)
train.to_csv('normalized_train_data.csv')
###########
else:
train = pd.read_csv('normalized_train_data.csv', index_col=[0])
train_values = train.values
if save_testing_data: ### testing ###
test_FD001 = pd.read_table("./CMAPSSData/test_FD001.txt", header=None, delim_whitespace=True)
test_FD002 = pd.read_table("./CMAPSSData/test_FD002.txt", header=None, delim_whitespace=True)
test_FD003 = pd.read_table("./CMAPSSData/test_FD003.txt", header=None, delim_whitespace=True)
test_FD004 = pd.read_table("./CMAPSSData/test_FD004.txt", header=None, delim_whitespace=True)
test_FD001.columns = column_name
test_FD002.columns = column_name
test_FD003.columns = column_name
test_FD004.columns = column_name
# load RUL data
RUL_FD001 = pd.read_table("./CMAPSSData/RUL_FD001.txt", header=None, delim_whitespace=True)
RUL_FD002 = pd.read_table("./CMAPSSData/RUL_FD002.txt", header=None, delim_whitespace=True)
RUL_FD003 = pd.read_table("./CMAPSSData/RUL_FD003.txt", header=None, delim_whitespace=True)
RUL_FD004 = pd.read_table("./CMAPSSData/RUL_FD004.txt", header=None, delim_whitespace=True)
RUL_FD001.columns = ['RUL']
RUL_FD002.columns = ['RUL']
RUL_FD003.columns = ['RUL']
RUL_FD004.columns = ['RUL']
previous_len = 0
frames = []
for (data_file, rul_file) in [('test_FD00' + str(i), 'RUL_FD00' + str(i)) for i in files]:
mean = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].mean()
std = eval(data_file).iloc[:, 2:len(list(eval(data_file)))].std()
std.replace(0, 1, inplace=True)
if min_max_norm:
scaler = MinMaxScaler()
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = scaler.fit_transform(
eval(data_file).iloc[:, 2:len(list(eval(data_file)))])
else:
eval(data_file).iloc[:, 2:len(list(eval(data_file)))] = (eval(data_file).iloc[:, 2:len(
list(eval(data_file)))] - mean) / std
eval(data_file)['RUL'] = compute_rul_of_one_file(eval(data_file), RUL_FD00X=eval(rul_file))
current_len = len(eval(data_file))
eval(data_file).index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
frames.append(eval(data_file))
print(data_file)
if len(files) == 1:
global test_engine_id
test_engine_id = eval(data_file)['engine_id']
test = pd.concat(frames)
test = test.drop('engine_id', 1)
test = test.drop('cycle', 1)
# if files[0] == 1 or files[0] == 3:
# test = test.drop('setting3', 1)
# test = test.drop('s18', 1)
# test = test.drop('s19', 1)
test_values = test.values * SCALE
np.save('normalized_test_data.npy', test_values)
test.to_csv('normalized_test_data.csv')
###########
else:
test = pd.read_csv('normalized_test_data.csv', index_col=[0])
test_values = test.values
return train_values, test_values, train, test
def get_PHM08Data(save=False):
"""
Function is to load PHM 2008 challenge dataset
"""
if save == False:
return np.load("./PHM08/processed_data/phm_training_data.npy"), np.load("./PHM08/processed_data/phm_testing_data.npy"), np.load(
"./PHM08/processed_data/phm_original_testing_data.npy")
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
phm_training_data = pd.read_table("./PHM08/train.txt", header=None, delim_whitespace=True)
phm_training_data.columns = column_name
phm_testing_data = pd.read_table("./PHM08/final_test.txt", header=None, delim_whitespace=True)
phm_testing_data.columns = column_name
print("phm training")
mean = phm_training_data.iloc[:, 2:len(list(phm_training_data))].mean()
std = phm_training_data.iloc[:, 2:len(list(phm_training_data))].std()
phm_training_data.iloc[:, 2:len(list(phm_training_data))] = (phm_training_data.iloc[:, 2:len(
list(phm_training_data))] - mean) / std
phm_training_data['RUL'] = compute_rul_of_one_file(phm_training_data)
print("phm testing")
mean = phm_testing_data.iloc[:, 2:len(list(phm_testing_data))].mean()
std = phm_testing_data.iloc[:, 2:len(list(phm_testing_data))].std()
phm_testing_data.iloc[:, 2:len(list(phm_testing_data))] = (phm_testing_data.iloc[:, 2:len(
list(phm_testing_data))] - mean) / std
phm_testing_data['RUL'] = 0
#phm_testing_data['RUL'] = compute_rul_of_one_file(phm_testing_data)
train_engine_id = phm_training_data['engine_id']
# print(phm_training_engine_id[phm_training_engine_id==1].index)
phm_training_data = phm_training_data.drop('engine_id', 1)
phm_training_data = phm_training_data.drop('cycle', 1)
global test_engine_id
test_engine_id = phm_testing_data['engine_id']
phm_testing_data = phm_testing_data.drop('engine_id', 1)
phm_testing_data = phm_testing_data.drop('cycle', 1)
phm_training_data = phm_training_data.values
phm_testing_data = phm_testing_data.values
engine_ids = train_engine_id.unique()
train_test_split = np.random.rand(len(engine_ids)) < 0.80
train_engine_ids = engine_ids[train_test_split]
test_engine_ids = engine_ids[~train_test_split]
# test_engine_id = pd.Series(test_engine_ids)
training_data = phm_training_data[train_engine_id[train_engine_id == train_engine_ids[0]].index]
for id in train_engine_ids[1:]:
tmp = phm_training_data[train_engine_id[train_engine_id == id].index]
training_data = np.concatenate((training_data, tmp))
# print(training_data.shape)
testing_data = phm_training_data[train_engine_id[train_engine_id == test_engine_ids[0]].index]
for id in test_engine_ids[1:]:
tmp = phm_training_data[train_engine_id[train_engine_id == id].index]
testing_data = np.concatenate((testing_data, tmp))
# print(testing_data.shape)
print(phm_training_data.shape, phm_testing_data.shape, training_data.shape, testing_data.shape)
np.save("./PHM08/processed_data/phm_training_data.npy", training_data)
np.savetxt("./PHM08/processed_data/phm_training_data.txt", training_data, delimiter=" ")
np.save("./PHM08/processed_data/phm_testing_data.npy", testing_data)
np.savetxt("./PHM08/processed_data/phm_testing_data.txt", testing_data, delimiter=" ")
np.save("./PHM08/processed_data/phm_original_testing_data.npy", phm_testing_data)
np.savetxt("./PHM08/processed_data/phm_original_testing_data.csv", phm_testing_data, delimiter=",")
return training_data, testing_data, phm_testing_data
def data_augmentation(files=1, low=[10, 40, 90, 170], high=[35, 85, 160, 250], plot=False, combine=False):
'''
This helper function only augments the training data to look like testing data.
Training data always run to a failure. But testing data is mostly stop before a failure.
Therefore, training data augmented to have scenarios without failure
:param files: select wich sub CMPASS dataset
:param low: lower bound for the random selection of the engine cycle
:param high: upper bound for the random selection of the engine cycle
:param plot: switch to plot the augmented data
:return:
'''
DEBUG = False
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
### Loading original data ###
if files == "phm":
train_FD00x = pd.read_table("./PHM08/processed_data/phm_training_data.txt", header=None, delim_whitespace=True)
train_FD00x.drop(train_FD00x.columns[len(train_FD00x.columns) - 1], axis=1, inplace=True)
train_FD00x.columns = column_name
else:
if combine:
train_FD00x,_,_ = combine_FD001_and_FD003()
else:
file_path = "./CMAPSSData/train_FD00" + str(files) + ".txt"
train_FD00x = pd.read_table(file_path, header=None, delim_whitespace=True)
train_FD00x.columns = column_name
print(file_path.split("/")[-1])
### Standered Normal ###
mean = train_FD00x.iloc[:, 2:len(list(train_FD00x))].mean()
std = train_FD00x.iloc[:, 2:len(list(train_FD00x))].std()
std.replace(0, 1, inplace=True)
train_FD00x.iloc[:, 2:len(list(train_FD00x))] = (train_FD00x.iloc[:, 2:len(list(train_FD00x))] - mean) / std
final_train_FD = train_FD00x.copy()
previous_len = 0
frames = []
for i in range(len(high)):
train_FD = train_FD00x.copy()
train_engine_id = train_FD['engine_id']
engine_ids = train_engine_id.unique()
total_ids = len(engine_ids)
train_rul = []
print("*************", final_train_FD.shape, total_ids, low[i], high[i], "*****************")
for id in range(1, total_ids + 1):
train_engine_id = train_FD['engine_id']
indexes = train_engine_id[train_engine_id == id].index ### filter indexes related to id
traj_data = train_FD.loc[indexes] ### filter trajectory data
cutoff_cycle = random.randint(low[i], high[i]) ### randomly selecting the cutoff point of the engine cycle
if cutoff_cycle > max(traj_data['cycle']):
cutoff_cycle = max(traj_data['cycle'])
train_rul.append(max(traj_data['cycle']) - cutoff_cycle) ### collecting remaining cycles
cutoff_cycle_index = traj_data['cycle'][traj_data['cycle'] == cutoff_cycle].index ### cutoff cycle index
if DEBUG:
print("traj_shape: ", traj_data.shape, "current_engine_id:", id, "cutoff_cycle:", cutoff_cycle,
"cutoff_index", cutoff_cycle_index, "engine_fist_index", indexes[0], "engine_last_index",
indexes[-1])
### removing rows after cutoff cycle index ###
if cutoff_cycle_index[0] != indexes[-1]:
drop_range = list(range(cutoff_cycle_index[0] + 1, indexes[-1] + 1))
train_FD.drop(train_FD.index[drop_range], inplace=True)
train_FD.reset_index(drop=True, inplace=True)
### calculating the RUL for augmented data
train_rul = pd.DataFrame.from_dict({'RUL': train_rul})
train_FD['RUL'] = compute_rul_of_one_file(train_FD, RUL_FD00X=train_rul)
### changing the engine_id for augmented data
train_engine_id = train_FD['engine_id']
for id in range(1, total_ids + 1):
indexes = train_engine_id[train_engine_id == id].index
train_FD.loc[indexes, 'engine_id'] = id + total_ids * (i + 1)
if i == 0: # should only execute at the first iteration
final_train_FD['RUL'] = compute_rul_of_one_file(final_train_FD)
current_len = len(final_train_FD)
final_train_FD.index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
### Re-indexing the augmented data
train_FD['RUL'].index = range(previous_len, previous_len + len(train_FD))
previous_len = previous_len + len(train_FD)
final_train_FD = pd.concat(
[final_train_FD, train_FD]) # concatanete the newly augmented data with previous data
frames.append(final_train_FD)
train = pd.concat(frames)
train.reset_index(drop=True, inplace=True)
train_engine_id = train['engine_id']
# print(train_engine_id)
engine_ids = train_engine_id.unique()
# print(engine_ids[1:])
np.random.shuffle(engine_ids)
# print(engine_ids)
training_data = train.loc[train_engine_id[train_engine_id == engine_ids[0]].index]
training_data.reset_index(drop=True, inplace=True)
previous_len = len(training_data)
for id in engine_ids[1:]:
traj_data = train.loc[train_engine_id[train_engine_id == id].index]
current_len = len(traj_data)
traj_data.index = range(previous_len, previous_len + current_len)
previous_len = previous_len + current_len
training_data = pd.concat([training_data, traj_data])
global training_engine_id
training_engine_id = training_data['engine_id']
training_data = training_data.drop('engine_id', 1)
training_data = training_data.drop('cycle', 1)
# if files == 1 or files == 3:
# training_data = training_data.drop('setting3', 1)
# training_data = training_data.drop('s18', 1)
# training_data = training_data.drop('s19', 1)
training_data_values = training_data.values * SCALE
np.save('normalized_train_data.npy', training_data_values)
training_data.to_csv('normalized_train_data.csv')
train = training_data_values
x_train = train[:, :train.shape[1] - 1]
y_train = train[:, train.shape[1] - 1] * RESCALE
print("training in augmentation", x_train.shape, y_train.shape)
if plot:
plt.plot(y_train, label="train")
plt.figure()
plt.plot(x_train)
plt.title("train")
# plt.figure()
# plt.plot(y_train)
# plt.title("test")
plt.show()
def analyse_Data(dataset, files=None, plot=True, min_max=False):
'''
Generate pre-processed data according to the given dataset
:param dataset: choose between "phm" for PHM 2008 dataset or "cmapss" for CMAPSS data set with file number
:param files: Only for CMAPSS dataset to select sub dataset
:param min_max: switch to allow min-max normalization
:return:
'''
if dataset == "phm":
training_data, testing_data, phm_testing_data = get_PHM08Data(save=True)
x_phmtrain = training_data[:, :training_data.shape[1] - 1]
y_phmtrain = training_data[:, training_data.shape[1] - 1]
x_phmtest = testing_data[:, :testing_data.shape[1] - 1]
y_phmtest = testing_data[:, testing_data.shape[1] - 1]
print("phmtrain", x_phmtrain.shape, y_phmtrain.shape)
print("phmtest", x_phmtrain.shape, y_phmtrain.shape)
print("phmtest", phm_testing_data.shape)
if plot:
# plt.plot(x_phmtrain, label="phmtrain_x")
plt.figure()
plt.plot(y_phmtrain, label="phmtrain_y")
# plt.figure()
# plt.plot(x_phmtest, label="phmtest_x")
plt.figure()
plt.plot(y_phmtest, label="phmtest_y")
# plt.figure()
# plt.plot(phm_testing_data, label="test")
plt.show()
elif dataset == "cmapss":
training_data, testing_data, training_pd, testing_pd = get_CMAPSSData(save=True, files=files,
min_max_norm=min_max)
x_train = training_data[:, :training_data.shape[1] - 1]
y_train = training_data[:, training_data.shape[1] - 1]
print("training", x_train.shape, y_train.shape)
x_test = testing_data[:, :testing_data.shape[1] - 1]
y_test = testing_data[:, testing_data.shape[1] - 1]
print("testing", x_test.shape, y_test.shape)
if plot:
plt.plot(y_train, label="train")
plt.figure()
plt.plot(y_test, label="test")
plt.figure()
plt.plot(x_train)
plt.title("train: FD00" + str(files[0]))
plt.figure()
plt.plot(y_train)
plt.title("train: FD00" + str(files[0]))
plt.show()
def combine_FD001_and_FD003():
column_name = ['engine_id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
train_FD001 = pd.read_table("./CMAPSSData/train_FD001.txt", header=None, delim_whitespace=True)
train_FD003 = pd.read_table("./CMAPSSData/train_FD003.txt", header=None, delim_whitespace=True)
train_FD001.columns = column_name
train_FD003.columns = column_name
FD001_max_engine_id = max(train_FD001['engine_id'])
train_FD003['engine_id'] = train_FD003['engine_id'] + FD001_max_engine_id
train_FD003.index = range(len(train_FD001), len(train_FD001) + len(train_FD003))
train_FD001_FD002 = pd.concat([train_FD001,train_FD003])
test_FD001 = pd.read_table("./CMAPSSData/test_FD001.txt", header=None, delim_whitespace=True)
test_FD003 = pd.read_table("./CMAPSSData/test_FD003.txt", header=None, delim_whitespace=True)
test_FD001.columns = column_name
test_FD003.columns = column_name
FD001_max_engine_id = max(test_FD001['engine_id'])
test_FD003['engine_id'] = test_FD003['engine_id'] + FD001_max_engine_id
test_FD003.index = range(len(test_FD001), len(test_FD001) + len(test_FD003))
test_FD001_FD002 = pd.concat([test_FD001,test_FD003])
RUL_FD001 = pd.read_table("./CMAPSSData/RUL_FD001.txt", header=None, delim_whitespace=True)
RUL_FD003 = pd.read_table("./CMAPSSData/RUL_FD003.txt", header=None, delim_whitespace=True)
RUL_FD001.columns = ['RUL']
RUL_FD003.columns = ['RUL']
RUL_FD003.index = range(len(RUL_FD001), len(RUL_FD001) + len(RUL_FD003))
RUL_FD001_FD002 = | pd.concat([test_FD001, test_FD003]) | pandas.concat |
import multiprocessing
import numpy as np
import pandas as pd
from kts.validation.leaderboard import leaderboard as lb
from kts.core.backend.memory import load
from kts.core.base_constructors import merge, wrap_stl_function, empty_like
def _apply_df(args):
"""
Args:
args:
Returns:
"""
df, func, num, kw = args
return num, df.swifter.apply(func, **kw)
def apply(dataframe, function, **kwargs):
"""Applies function to dataframe faster.
If n_threads is in kwargs and is greater than 1, applies by multiprocessing.
:return: same as df.apply(function)
Args:
dataframe:
function:
**kwargs:
Returns:
"""
if "n_threads" in kwargs:
n_threads = kwargs.pop("n_threads")
else:
n_threads = 1
if n_threads == 1:
return dataframe.swifter.apply(function, **kwargs)
pool = multiprocessing.Pool(processes=n_threads)
result = pool.map(
_apply_df,
[(d, function, i, kwargs)
for i, d in enumerate(np.array_split(dataframe, n_threads))],
)
pool.close()
result = sorted(result, key=lambda x: x[0])
return pd.concat([i[1] for i in result])
def get_categorical(df):
"""
Args:
df:
Returns:
"""
cat_features = [col for col in df.columns if df[col].dtype == object]
return cat_features
def get_numeric(df):
"""
Args:
df:
Returns:
"""
num_features = [col for col in df.columns if df[col].dtype != object]
return num_features
def stack(ids):
"""
Args:
ids:
Returns:
"""
def __stack(df):
oof_preds = merge([lb[id_exp].oof for id_exp in ids])
oof_col_names = merge([lb[id_exp].oof.columns for id_exp in ids])
try:
res = oof_preds[df.index]
if res.isna().sum().sum() > 0:
bad_indices = res.isna().sum(axis=1) > 0
preds = [lb[id_exp].predict(df[bad_indices]) for id_exp in ids]
res[bad_indices] = merge([
pd.DataFrame(data=pred, columns=col_names)
for col_names, pred in zip(oof_col_names, preds)
])
except:
preds = [lb[id_exp].predict(df) for id_exp in ids]
res = merge([
| pd.DataFrame(data=pred, columns=col_names) | pandas.DataFrame |
# Copyright (c) 2019-2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Apache License, version 2.0.
# If a copy of the Apache License, version 2.0 was not distributed with this file, you can obtain one at http://www.apache.org/licenses/LICENSE-2.0.
# SPDX-License-Identifier: Apache-2.0
# This file is part of hadar-simulator, a python adequacy library for everyone.
import unittest
import numpy as np
import pandas as pd
from pandas import MultiIndex
from hadar.workflow.pipeline import (
Stage,
FreePlug,
RestrictedPlug,
FocusStage,
Clip,
Rename,
Drop,
Fault,
RepeatScenario,
Pipeline,
)
class Double(Stage):
def __init__(self):
Stage.__init__(self, FreePlug())
def _process_timeline(self, timelines: pd.DataFrame) -> pd.DataFrame:
return timelines * 2
class Max9(Stage):
def __init__(self):
Stage.__init__(self, FreePlug())
def _process_timeline(self, timelines: pd.DataFrame) -> pd.DataFrame:
return timelines.clip(None, 9)
class Divide(FocusStage):
def __init__(self):
Stage.__init__(self, RestrictedPlug(inputs=["a", "b"], outputs=["d", "r"]))
def _process_scenarios(self, n_scn: int, scenario: pd.DataFrame) -> pd.DataFrame:
scenario.loc[:, "d"] = (scenario["a"] / scenario["b"]).apply(np.floor)
scenario.loc[:, "r"] = scenario["a"] - scenario["b"] * scenario["d"]
return scenario.drop(["a", "b"], axis=1)
class Inverse(FocusStage):
def __init__(self):
FocusStage.__init__(self, RestrictedPlug(inputs=["d"], outputs=["d", "-d"]))
def _process_scenarios(self, n_scn: int, scenario: pd.DataFrame) -> pd.DataFrame:
scenario.loc[:, "-d"] = -scenario["d"]
return scenario.copy()
class Wrong(Stage):
def __init__(self):
Stage.__init__(self, plug=RestrictedPlug(inputs=["e"], outputs=["e"]))
def _process_timeline(self, timeline: pd.DataFrame) -> pd.DataFrame:
return timeline
class TestFreePlug(unittest.TestCase):
def test_linkable_to(self):
self.assertTrue(FreePlug().linkable_to(FreePlug()))
def test_join_to_fre(self):
# Input
a = FreePlug()
b = FreePlug()
# Test
c = a + b
self.assertEqual(a, c)
def test_join_to_restricted(self):
# Input
a = FreePlug()
b = RestrictedPlug(inputs=["a", "b"], outputs=["c", "d"])
# Test
c = a + b
self.assertEqual(b, c)
class TestRestrictedPlug(unittest.TestCase):
def test_linkable_to_free(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b"])
# Test
self.assertTrue(a.linkable_to(FreePlug()))
def test_linkable_to_restricted_ok(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c"], outputs=["e"])
# Test
self.assertTrue(a.linkable_to(b))
def test_linkable_to_restricted_wrong(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c", "f"], outputs=["e"])
# Test
self.assertFalse(a.linkable_to(b))
def test_join_to_free(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b"])
# Test
b = a + FreePlug()
self.assertEqual(a, b)
def test_join_to_restricted(self):
# Input
a = RestrictedPlug(inputs=["a"], outputs=["b", "c", "d"])
b = RestrictedPlug(inputs=["b", "c"], outputs=["e"])
# Expected
exp = RestrictedPlug(inputs=["a"], outputs=["e", "d"])
# Test
c = a + b
self.assertEqual(exp, c)
class TestPipeline(unittest.TestCase):
def test_compute(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3]})
pipe = Pipeline(stages=[Double(), Double()])
# Expected
exp = pd.DataFrame({(0, "a"): [4, 8, 12]})
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
def test_add(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
pipe = Pipeline(stages=[Double(), Double()])
pipe += Divide()
# Expected
exp = pd.DataFrame({(0, "d"): [1, 1, 1], (0, "r"): [0, 0, 0]}, dtype=float)
# Test & Verify
o = pipe(i)
self.assertEqual(3, len(pipe.stages))
self.assertIsInstance(pipe.plug, RestrictedPlug)
pd.testing.assert_frame_equal(exp, o)
def test_link_pipeline_free_to_free(self):
# Input
i = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pipe = Double() + Max9()
# Expected
exp = pd.DataFrame({(0, "a"): [2, 4, 6], (0, "b"): [8, 9, 9]})
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual([], pipe.plug.inputs)
self.assertEqual([], pipe.plug.outputs)
def test_link_pipeline_free_to_restricted(self):
# Input
i = pd.DataFrame({"a": [10, 20, 32], "b": [4, 5, 6]})
pipe = Double() + Divide()
# Expected
exp = pd.DataFrame({(0, "d"): [2, 4, 5], (0, "r"): [4, 0, 4]}, dtype="float")
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual(["a", "b"], pipe.plug.inputs)
self.assertEqual(["d", "r"], pipe.plug.outputs)
def test_link_pipeline_restricted_to_free(self):
# Input
i = pd.DataFrame({"a": [10, 20, 32], "b": [4, 5, 6]})
pipe = Divide() + Double()
# Expected
exp = pd.DataFrame({(0, "d"): [4, 8, 10], (0, "r"): [4, 0, 4]}, dtype="float")
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual(["a", "b"], pipe.plug.inputs)
self.assertEqual(["d", "r"], pipe.plug.outputs)
def test_link_pipeline_restricted_to_restricted(self):
# Input
i = pd.DataFrame({"a": [10, 20, 32], "b": [4, 5, 6]})
pipe = Divide() + Inverse()
# Expected
exp = pd.DataFrame(
{(0, "d"): [2, 4, 5], (0, "-d"): [-2, -4, -5], (0, "r"): [2, 0, 2]},
dtype="float",
)
# Test & Verify
o = pipe(i)
pd.testing.assert_frame_equal(exp, o)
self.assertEqual({"a", "b"}, set(pipe.plug.inputs))
self.assertEqual({"d", "-d", "r"}, set(pipe.plug.outputs))
def test_wrong_link(self):
# Test & Verify
self.assertRaises(ValueError, lambda: Divide() + Wrong())
class TestStage(unittest.TestCase):
def test_compute(self):
# Input
i = pd.DataFrame(
{
(0, "a"): [1, 2, 3],
(0, "b"): [4, 5, 6],
(1, "a"): [10, 20, 30],
(1, "b"): [40, 50, 60],
}
)
stage = Double()
# Expected
exp = pd.DataFrame(
{
(0, "a"): [2, 4, 6],
(0, "b"): [8, 10, 12],
(1, "a"): [20, 40, 60],
(1, "b"): [80, 100, 120],
}
)
# Test & Verify
o = stage(i)
pd.testing.assert_frame_equal(exp, o)
def test_wrong_compute(self):
i = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
pipe = Inverse()
self.assertRaises(ValueError, lambda: pipe(i))
def test_standardize_column(self):
i = pd.DataFrame({"a": [1, 2, 3]})
# Expected
exp = pd.DataFrame({(0, "a"): [1, 2, 3]})
res = Stage.standardize_column(i)
pd.testing.assert_frame_equal(exp, res)
def test_build_multi_index(self):
# Input
scenarios = [1, 2, 3]
names = ["a", "b"]
# Expected
exp = MultiIndex.from_tuples(
[(1, "a"), (1, "b"), (2, "a"), (2, "b"), (3, "a"), (3, "b")]
)
# Test & Verify
index = Stage.build_multi_index(scenarios=scenarios, names=names)
| pd.testing.assert_index_equal(exp, index) | pandas.testing.assert_index_equal |
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
| Series(["a", "b", "c", "a"]) | pandas.Series |
import numpy as np
import pandas as pd
import datetime as dt
import os
import zipfile
from datetime import datetime, timedelta
from urllib.parse import urlparse
study_prefix = "U01"
def get_user_id_from_filename(f):
#Get user id from from file name
return(f.split(".")[3])
def get_file_names_from_zip(z, file_type=None, prefix=study_prefix):
#Extact file list
file_list = list(z.filelist)
if(filter is None):
filtered = [f.filename for f in file_list if (prefix in f.filename) and (".csv" in f.filename)]
else:
filtered = [f.filename for f in file_list if (file_type in f.filename and prefix in f.filename)]
return(filtered)
def get_data_catalog(catalog_file, data_file, data_dir, dict_dir):
dc=pd.read_csv(catalog_file)
dc=dc.set_index("Data Product Name")
dc.data_file=data_dir+data_file #add data zip file field
dc.data_dir=data_dir #add data zip file field
dc.dict_dir=dict_dir #add data distionary directory field
return(dc)
def get_data_dictionary(data_catalog, data_product_name):
dictionary_file = data_catalog.dict_dir + data_catalog.loc[data_product_name]["Data Dictionary File Name"]
dd=pd.read_csv(dictionary_file)
dd=dd.set_index("ElementName")
dd.data_file_name = data_catalog.loc[data_product_name]["Data File Name"] #add data file name pattern field
dd.name = data_product_name #add data product name field
dd.index_fields = data_catalog.loc[data_product_name]["Index Fields"] #add index fields
dd.description = data_catalog.loc[data_product_name]["Data Product Description"]
return(dd)
def get_df_from_zip(file_type,zip_file, participants):
#Get participant list from participants data frame
participant_list = list(participants["Participant ID"])
#Open data zip file
z = zipfile.ZipFile(zip_file)
#Get list of files of specified type
file_list = get_file_names_from_zip(z, file_type=file_type)
#Open file inside zip
dfs=[]
for file_name in file_list:
sid = get_user_id_from_filename(file_name)
if(sid in participant_list):
f = z.open(file_name)
file_size = z.getinfo(file_name).file_size
if file_size > 0:
df = pd.read_csv(f, low_memory=False)
df["Subject ID"] = sid
dfs.append(df)
else:
print('warning %s is empty (size = 0)' % file_name)
df = pd.concat(dfs)
return(df)
def fix_df_column_types(df, dd):
#Set Boolean/String fields to string type to prevent
#interpretation as numeric for now. Leave nans in to
#indicate missing data.
for field in list(df.keys()):
if not (field in dd.index): continue
dd_type = dd.loc[field]["DataType"]
if dd_type in ["Boolean","String","Categorical"]:
if field == 'url':
urls = df[field].values
for index, url in enumerate(urls):
parsed = urlparse(url)
df[field].values[index] = parsed.path[1:]
else:
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else str(x))
elif dd_type in ["Ordinal"]:
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else int(x))
elif dd_type in ["Time"]:
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else pd.to_timedelta(x))
elif dd_type in ["Date"]:
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else datetime.strptime(x, "%Y-%m-%d"))
elif dd_type in ["DateTime"]:
#Keep only time for now
max_length = max([len(str(x).split(':')[-1]) for x in df[field].values]) # length of last item after ':'
if max_length < 6: # this includes time with AM/PM
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else pd.to_timedelta(x[11:]))
else: # for example: 2020-06-12 23:00:1592002802
df[field] = df[field].map(lambda x: x if str(x).lower()=="nan" else
pd.to_timedelta(pd.to_datetime(x[:16]).strftime("%H:%M:%S")))
#print('\n%s nlargest(10) =\n%s' % (field, df[field].value_counts().nlargest(10)))
return(df)
def get_participant_info(data_catalog):
file = data_catalog.data_dir + data_catalog.loc["Participant Information"]["Data File Name"]
df = pd.read_csv(file)
return(df)
def get_participants_by_type(data_catalog, participant_type):
pi = get_participant_info(data_catalog)
check_type = []
for type_i in pi["Participant Type"].values:
if str(type_i).find(participant_type) >= 0:
check_type.append(True)
else:
check_type.append(False)
pi = pi[check_type]
return(pi)
def crop_data(participants_df, df, b_display, b_crop_end=True):
#Crop before the intervention start date
#Set b_crop_end = True to also crop after the end date (for withdrew status)
participants_df = participants_df.set_index("Participant ID")
fields = list(df.keys())
#Create an observation indicator for an observed value in any
#of the above fields. Sort to make sure data frame is in date order
#per participant
obs_df = 0+((0+~df[fields].isnull()).sum(axis=1)>0)
obs_df.sort_index(axis=0, inplace=True,level=1)
#Get the participant ids according to the data frame
participants = list(obs_df.index.levels[0])
frames = []
for p in participants:
intervention_date = participants_df.loc[p]['Intervention Start Date']
dates = pd.to_datetime(obs_df[p].index)
#Check if there is any data for the participant
if(len(obs_df[p]))>0:
new_obs_df = obs_df[p].copy()
if str(intervention_date).lower() != "nan":
#Check if intervention date is past today's date
intervention_date = pd.to_datetime(intervention_date)
new_obs_df = new_obs_df.loc[dates >= intervention_date]
dates = | pd.to_datetime(new_obs_df.index) | pandas.to_datetime |
import copy
import pickle as pickle
import os
import sys
import time
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from numpy import array, float32
import seaborn as sns
FRAME = 10
TWINDOW = 300
TDELTA = 600 # 300
MIN_FRAME = 30
nan = np.nan
LOG_DIR = 'experiment_logs/'
prefix = os.path.expanduser('~')
SAVE_DIR = prefix+'/Dropbox/'
X_VARS = ['time', 'n_opt_calls', 'n_runs', 'n_learning_iters']
Y_VARS = ['n_success', 'opt_cost', 'tree_life']
def get_colors(n_colors):
return cm.rainbow(np.linspace(0, 1, n_colors))
def get_test_data(keywords, include, exclude, pre=False, rerun=False,
tdelta=TDELTA, wind=TWINDOW, lab='', lenthresh=0.99,
split_runs=False, label_vars=[]):
exp_probs = os.listdir(LOG_DIR)
all_data = {}
for k in keywords:
used = []
all_data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
if dir_name.find(k) < 0 and dir_prefix.find(k) < 0:
continue
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
if len(include):
skip = True
for inc in include:
if dir_name.find(inc) >= 0 or dir_prefix.find(inc) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for exc in exclude:
if dir_name.find(exc) >= 0 or dir_prefix.find(exc) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
full_dir = dir_prefix + dir_name
full_exp = full_dir[:full_dir.rfind('_')]
if full_exp in used: continue
all_data[k][full_exp] = {}
used.append(full_exp)
i = 0
data = []
while i < 20:
cur_dir = '{0}_{1}'.format(full_exp, i)
if not os.path.isdir(cur_dir):
i += 1
continue
fnames = os.listdir(cur_dir)
if pre:
info = [f for f in fnames if f.find('hl_test_pre') >= 0 and f.endswith('pre_log.npy')]
elif rerun:
info = [f for f in fnames if f.find('hl_test') >= 0 and f.endswith('test_log.npy')]
else:
info = [f for f in fnames if f.find('hl_test') >= 0 and f.endswith('test_log.npy')]
if len(info):
for fname in info:
# print(('Loading data from', fname, full_dir))
try:
data.append(np.load(cur_dir+'/'+fname))
except Exception as e:
print('Skipping', fname, full_dir)
continue
label = gen_label(cur_dir, label_vars, split_runs, i)
all_data[k][full_exp][cur_dir] = {}
all_data[k][full_exp][cur_dir][cur_dir] = []
for buf in data:
for pts in buf:
pt = pts[0]
no, nt = int(pt[4]), int(pt[5])
all_data[k][full_exp][cur_dir][cur_dir].append({'time': pt[3], 'success at end': pt[0], 'path length': pt[1], 'distance from goal': pt[2], 'n_data': pt[6], 'key': (no, nt), 'label': label, 'ind': i, 'success anywhere': pt[7], 'optimal_rollout_success': pt[9], 'number of plans': pt[10], 'subgoals anywhere': pt[11], 'subgoals closest distance': pt[12], 'collision': pt[8], 'exp id': i})
if len(pt) > 13:
all_data[k][full_exp][cur_dir][cur_dir][-1]['any target'] = pt[13]
if len(pt) > 14:
all_data[k][full_exp][cur_dir][cur_dir][-1]['smallest tolerance'] = pt[14]
if len(pt) > 16:
all_data[k][full_exp][cur_dir][cur_dir][-1]['success with postcond'] = pt[16]
if len(pt) > 17:
all_data[k][full_exp][cur_dir][cur_dir][-1]['success with adj_eta'] = pt[17]
if len(pt) > 18:
all_data[k][full_exp][cur_dir][cur_dir][-1]['episode return'] = pt[18]
# all_data[k][full_exp][cur_dir][cur_dir].append({'time': (pt[3]//tdelta+1)*tdelta, 'success at end': pt[0], 'path length': pt[1], 'distance from goal': pt[2], 'n_data': pt[6], 'key': (no, nt), 'description': label, 'ind': i, 'success anywhere': pt[7], 'optimal_rollout_success': pt[9], 'number of plans': pt[10]})
i += 1
return all_data
def get_policy_data(policy, keywords=[], exclude=[], include=[]):
exp_probs = os.listdir(LOG_DIR)
data = {}
for k in keywords:
data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
full_dir = dir_prefix + dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
skip = False
for ekey in exclude:
if full_dir.find(ekey) >= 0:
skip = True
if len(include):
skip = True
for inc in include:
if dir_name.find(inc) >= 0 or dir_prefix.find(inc) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for exc in exclude:
if dir_name.find(exc) >= 0 or dir_prefix.find(exc) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
if not os.path.isdir(full_dir) or full_dir.find(k) < 0: continue
full_exp = full_dir[:-1]
if full_exp not in data[k]:
data[k][full_exp] = {}
file_names = os.listdir(full_dir)
r = 'policy_{0}_log.txt'.format(policy)
rollout_data = {}
if not os.path.isfile(full_dir+'/'+r):
r = 'policy_{0}_log.pkl'.format(policy)
if not os.path.isfile(full_dir+'/'+r): continue
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
next_data = str.split(next_data, '\n\n')
try:
r_data = [eval(d) for d in next_data if len(d)]
except:
continue
print('Loading {0} pts for {1}'.format(len(r_data), full_dir+'/'+r))
for pt in r_data:
pt['exp id'] = 0
if type(pt['train_loss']) is dict:
pt['train_loss'] = pt['train_loss']['loss']
if type(pt['val_loss']) is dict:
pt['val_loss'] = pt['val_loss']['loss']
if 'var' in pt and type(pt['var']) is dict:
pt['var'] = pt['var'][policy]
rollout_data[r] = r_data
data[k][full_exp][full_dir] = rollout_data
return data
def get_motion_data(keywords=[], exclude=[], include=[]):
exp_probs = os.listdir(LOG_DIR)
data = {}
for k in keywords:
data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
full_dir = dir_prefix + dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
skip = False
for ekey in exclude:
if full_dir.find(ekey) >= 0:
skip = True
if len(include):
skip = True
for inc in include:
if dir_name.find(inc) >= 0 or dir_prefix.find(inc) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for exc in exclude:
if dir_name.find(exc) >= 0 or dir_prefix.find(exc) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
if not os.path.isdir(full_dir) or full_dir.find(k) < 0: continue
full_exp = full_dir[:-1]
if full_exp not in data[k]:
data[k][full_exp] = {}
file_names = os.listdir(full_dir)
file_names = [fname for fname in file_names if fname.find('MotionInfo') >= 0]
rollout_data = {'motion': []}
for r in file_names:
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
next_data = str.split(next_data, '\n\n')
try:
r_data = [eval(d) for d in next_data if len(d)]
except:
continue
for pt in r_data:
pt['exp id'] = 0
if full_exp.find('full_feed') >= 0:
pt['opt duration per ts'] = 1.4 / pt['opt duration per ts']
pt['description'] = 'RoboSuite'
elif full_exp.find('panda') >= 0:
pt['opt duration per ts'] = 1.4 / pt['opt duration per ts']
pt['description'] = 'RoboDesk'
print('MOTION: Loading {0} pts for {1}'.format(len(r_data), full_dir+'/'+r))
rollout_data['motion'].extend(r_data)
data[k][full_exp][full_dir] = rollout_data
return data
def get_rollout_info_data(keywords=[], exclude=[], include=[]):
exp_probs = os.listdir(LOG_DIR)
data = {}
for k in keywords:
data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
full_dir = dir_prefix + dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
skip = False
for ekey in exclude:
if full_dir.find(ekey) >= 0:
skip = True
if len(include):
skip = True
for inc in include:
if dir_name.find(inc) >= 0 or dir_prefix.find(inc) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for exc in exclude:
if dir_name.find(exc) >= 0 or dir_prefix.find(exc) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
if not os.path.isdir(full_dir) or full_dir.find(k) < 0: continue
full_exp = full_dir[:-1]
if full_exp not in data[k]:
data[k][full_exp] = {}
file_names = os.listdir(full_dir)
file_names = [fname for fname in file_names if fname.find('RolloutInfo') >= 0]
rollout_data = {'rollout': []}
for r in file_names:
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
next_data = str.split(next_data, '\n\n')
try:
r_data = [eval(d) for d in next_data if len(d)]
except:
continue
for pt in r_data:
pt['exp id'] = 0
goal_vals = pt.get('per_goal_success', {'basegoal': 0.})
for goal in goal_vals:
new_pt = copy.copy(pt)
new_pt['goal'] = goal
new_pt['success rate'] = goal_vals[goal]
rollout_data['rollout'].append(new_pt)
print('ROLLOUT: Loading {0} pts for {1}'.format(len(r_data), full_dir+'/'+r))
#rollout_data['rollout'].extend(r_data)
data[k][full_exp][full_dir] = rollout_data
return data
def get_rollout_data(keywords=[], nfiles=20, exclude=[]):
exp_probs = os.listdir(LOG_DIR)
data = {}
for k in keywords:
data[k] = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
full_dir = dir_prefix + dir_name
if not os.path.isdir(full_dir) or full_dir.find(k) < 0: continue
full_exp = full_dir[:-1]
if full_exp not in data[k]:
data[k][full_exp] = {}
file_names = os.listdir(full_dir)
rollout_logs = ['rollout_log_{0}_True.txt'.format(i) for i in range(nfiles)]# [f for f in file_names if f.startswith('rollout')]
rollout_logs += ['rollout_log_{0}_False.txt'.format(i) for i in range(nfiles)]
rollout_data = {}
for r in rollout_logs:
if not os.path.isfile(full_dir+'/'+r): continue
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
try:
r_data = eval(next_data)
except:
continue
for pt in next_data:
pt['exp id'] = 0
rollout_data[r] = r_data
else:
print(('no data for', r))
data[k][full_exp][full_dir] = rollout_data
return data
def gen_first_success_plots(x_var='time'):
exp_probs = os.listdir(LOG_DIR)
master_plot = []
all_data = []
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
full_dir = dir_prefix + dir_name
with open(full_dir+'/exp_info.txt', 'r') as f:
exp_info = f.read()
file_names = os.listdir(full_dir)
rollout_logs = [f for f in file_names if f.startswith('rollout') and f.endswith('False.txt')]
rollout_data = {}
ts_to_data = {}
for i, r in enumerate(rollout_logs):
print((exp_name, dir_name, r))
with open(full_dir+'/'+r, 'r') as f:
next_data = f.read()
if len(next_data):
costs = eval(next_data)
if 'avg_first_success' not in costs[0]: continue
for j, c in enumerate(costs):
c['ind'] = np.mean(c['avg_first_success'])
if j not in ts_to_data:
ts_to_data[j] = []
ts_to_data[j].append(c)
rollout_data[r] = costs
xs = [np.mean([c[x_var] for c in ts_to_data[n]]) for n in ts_to_data]
ys = [np.mean([c['ind'] for c in ts_to_data[n]]) for n in ts_to_data]
all_data.append((exp_name+dir_name, xs, ys))
plt.title('Steps to first success')
plt.xlabel(x_var)
plt.ylabel('Avg. step of first success')
colors = get_colors(len(all_data))
for i, (label, xs, ys) in enumerate(all_data):
plt.plot(xs, ys, label=label, color=colors[i])
plt.savefig(SAVE_DIR+'/goal_vs_{0}.png'.format(x_var), pad_inches=0.01)
plt.clf()
def get_td_loss(keywords=[], exclude=[], pre=False):
tdelta = 5
exp_probs = os.listdir(LOG_DIR)
exp_data = {}
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
# exp_data = []
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
if len(keywords):
skip = True
for k in keywords:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = False
if skip: continue
if len(exclude):
skip = False
for k in exclude:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = True
if skip: continue
full_dir = dir_prefix + dir_name
full_exp = full_dir[:-1]
i = 0
data = []
while i < 20:
if not os.path.isdir(full_exp+str(i)):
i += 1
continue
fnames = os.listdir(full_exp+str(i))
info = [f for f in fnames if f.find('td_error') >= 0 and f.endswith('npy') and f.find('test') < 0]
if len(info):
cur_data = []
for step in info:
cur_data.append(np.load(full_exp+str(i)+'/'+step))
dlen = max([len(dt) for dt in cur_data])
data.append([])
for n in range(dlen):
data[-1].append(np.mean([cur_data[ind][n] for ind in range(len(cur_data)) if n < len(cur_data[ind])], axis=0))
data[-1] = np.array(data[-1])
i += 1
if not len(data):
print(('skipping', full_exp))
continue
dlen = min([len(d) for d in data])
dmax = max([len(d) for d in data])
print(('Gathering data for', full_exp, 'length:', dlen, 'all len:', [len(d) for d in data]))
end = False
cur_t = 0
while not end:
end = True
for d in data:
next_frame = d[cur_t:cur_t+tdelta] # [pt[0] for pt in d if pt[0,3] >= cur_t and pt[0,3] <= cur_t + TWINDOW]
if len(next_frame):
end = False
if len(next_frame) >= MIN_FRAME:
next_pt = np.mean(next_frame, axis=0)
no, nt = 0, 0 # int(next_pt[4]), int(next_pt[2])
if (no, nt) not in exp_data:
exp_data[no, nt] = []
exp_data[no, nt].append((full_exp, cur_t, next_pt[0]))
cur_t += tdelta
'''
for i in range(dmax - FRAME):
cur_t = np.mean([d[i:i+FRAME,:,3] for d in data if i+FRAME < len(d)])
for d in data:
if len(d) < i+FRAME: continue
cur_fr = np.mean(d[i:i+FRAME], axis=0)
for pt in cur_fr:
val = pt[0]
# cur_t = pt[3]
nt = int(pt[2])
no = int(pt[4])
if (no, nt) not in exp_data:
exp_data[no, nt] = []
exp_data[no, nt].append((full_exp, cur_t, val))
'''
for no, nt in exp_data:
print('Plotting', no, nt, exp_name)
pd_frame = pd.DataFrame(exp_data[no, nt], columns=['exp_name', 'time', 'value'])
sns.set()
sns_plot = sns.relplot(x='time', y='value', hue='exp_name', kind='line', data=pd_frame)
keyid = ''
for key in keywords:
keyid += '_{0}'.format(key)
pre_lab = '_pre' if pre else ''
sns_plot.savefig(SAVE_DIR+'/{0}obj_{1}targ_td_error{2}{3}.png'.format(no, nt, keyid, pre_lab))
def get_fail_info(keywords=[], exclude=[], pre=False, rerun=False, xvar='time', avg_time=True, tdelta=TDELTA, wind=TWINDOW, lab='', lenthresh=0.99, label_vars=[], include=[], max_t=14400):
exp_probs = os.listdir(LOG_DIR)
exp_data = {}
exp_len_data = {}
exp_dist_data = {}
exp_true_data = {}
targets = {}
used = []
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
if len(keywords):
skip = False
for k in keywords:
if dir_name.find(k) < 0 and dir_prefix.find(k) < 0:
skip = True
if skip: continue
if len(include):
skip = True
for k in include:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = False
if skip: continue
print(dir_name)
if len(exclude):
skip = False
for k in exclude:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
full_dir = dir_prefix + dir_name
full_exp = full_dir[:full_dir.rfind('_')]
if full_exp in used: continue
used.append(full_exp)
i = 0
data = []
while i < 20:
cur_dir = '{0}_{1}'.format(full_exp, i)
if not os.path.isdir(cur_dir):
i += 1
continue
fnames = os.listdir(cur_dir)
info = [f for f in fnames if f.find('failure') >= 0 and f.endswith('data.txt')]
if len(info):
for fname in info:
print(('Loading data from', fname))
with open(cur_dir+'/'+fname, 'r') as f:
data.append(f.read().splitlines())
label = gen_label(cur_dir, label_vars)
for buf in data:
for pts in buf:
pts = eval(pts)
no, nt = int(pts['no']), int(pts['nt'])
if (no,nt) not in exp_data: exp_data[no,nt] = []
if (no,nt) not in targets: targets[no,nt] = []
pts['exp_name'] = label
exp_data[no,nt].append(pts)
targs = pts['goal']
targinfo = []
for targind in range(len(targs)):
if targs[targind] == 1:
targinfo.append(targind)
targets[no,nt].append([tuple(targinfo)])
i += 1
for no, nt in targets:
print(('Plotting', no, nt, exp_name))
pd_frame = pd.DataFrame(targets[no, nt], columns=['targets']) #'target_{0}'.format(i) for i in range(no)])
print((pd_frame['targets'].value_counts()[:10]))
sns.set()
plt.clf()
sns_plot = sns.countplot(x="value", hue="variable", data=pd.melt(pd_frame))
keyid = ''
for key in keywords[:1]:
keyid += '_{0}'.format(key)
pre_lab = '_pre' if pre else ''
if rerun: pre_lab += '_rerun'
plt.savefig(SAVE_DIR+'/{0}obj_{1}targ_failedtargets{2}{3}{4}.png'.format(no, nt, keyid, pre_lab, lab))
plt.clf()
def get_hl_tests(keywords=[], exclude=[], pre=False, rerun=False, xvar='time', avg_time=True, tdelta=TDELTA, wind=TWINDOW, lab='', lenthresh=0.99, label_vars=[], include=[], max_t=14400):
exp_probs = os.listdir(LOG_DIR)
all_data = []
exp_data = {}
exp_len_data = {}
exp_dist_data = {}
exp_true_data = {}
used = []
for exp_name in exp_probs:
dir_prefix = LOG_DIR + exp_name + '/'
if not os.path.isdir(dir_prefix): continue
exp_dirs = os.listdir(dir_prefix)
for dir_name in exp_dirs:
d = dir_name
if d.find('.') >= 0 or d.find('trained') >= 0: continue
if len(keywords):
skip = False
for k in keywords:
if dir_name.find(k) < 0 and dir_prefix.find(k) < 0:
skip = True
if skip: continue
if len(include):
skip = True
for k in include:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = False
if skip: continue
print(dir_name)
if len(exclude):
skip = False
for k in exclude:
if dir_name.find(k) >= 0 or dir_prefix.find(k) >= 0:
skip = True
print(('skipping', dir_name))
if skip: continue
full_dir = dir_prefix + dir_name
full_exp = full_dir[:full_dir.rfind('_')]
if full_exp in used: continue
used.append(full_exp)
i = 0
data = []
while i < 20:
cur_dir = '{0}_{1}'.format(full_exp, i)
if not os.path.isdir(cur_dir):
i += 1
continue
fnames = os.listdir(cur_dir)
if pre:
info = [f for f in fnames if f.find('hl_test_pre') >= 0 and f.endswith('pre_log.npy')]
elif rerun:
info = [f for f in fnames if f.find('hl_test') >= 0 and f.endswith('test_log.npy')]
else:
info = [f for f in fnames if f.find('hl_test') >= 0 and f.endswith('test_log.npy')]
if len(info):
for fname in info:
print(('Loading data from', fname))
data.append(np.load(cur_dir+'/'+fname))
label = gen_label(cur_dir, label_vars)
label = label.replace('_', ' ')
for buf in data:
for pts in buf:
pt = pts[0]
no, nt = int(pt[4]), int(pt[5])
if (no,nt) not in exp_data: exp_data[no,nt] = []
if (no,nt) not in exp_len_data: exp_len_data[no,nt] = []
if (no,nt) not in exp_dist_data: exp_dist_data[no,nt] = []
if (no,nt) not in exp_true_data: exp_true_data[no,nt] = []
if xvar == 'time':
xval = (pt[3] // tdelta) * tdelta
if xval < max_t:
exp_data[no, nt].append((label, xval, pt[0]))
exp_data[no, nt].append((label, xval+tdelta, pt[0]))
if pt[0] > lenthresh:
exp_len_data[no, nt].append((label, xval, pt[1]))
if pt[0] <= 0.5: exp_dist_data[no, nt].append((label, xval, pt[2]))
if len(pt) > 7: exp_true_data[no, nt].append((label, xval, pt[7]))
elif rerun:
exp_data[no, nt].append((label, pt[-1], pt[0]))
if pt[0] > lenthresh:
exp_len_data[no, nt].append((label, pt[-1], pt[1]))
elif xvar == 'N' or xvar == 'n_data':
exp_data[no, nt].append((label, pt[6], pt[0]))
if pt[0] > lenthresh:
exp_len_data[no, nt].append((label, pt[6], pt[1]))
all_data.append({'time': (pt[3]//tdelta)*tdelta, 'value': pt[0], 'len': pt[1], 'dist': pt[2], 'N': pt[6], 'problem': '{0} object {1} goals'.format(no, nt), 'description': label, 'ind': i})
i += 1
pd_frame = pd.DataFrame(all_data, columns=['time', 'description', 'N', 'value', 'len', 'dist', 'problem', 'ind'])
# pd_frame = pd_frame.groupby(['time', 'description', 'problem', 'ind'], as_index=False).mean()
sns.set()
sns.set_context('paper', font_scale=2)
plt.title('3 object with 3 goals')
fig = plt.figure(figsize=(10,6))
axs = fig.subplots(ncols=3)
sns_plot = sns.relplot(x=xvar, y='value', hue='description', row='problem', kind='line', data=pd_frame)
sns_plot.fig.set_figwidth(10)
sns_plot._legend.remove()
sns_plot.fig.get_axes()[0].legend(loc=(0., -0.6), prop={'size': 15})
sns_plot.fig.axes[0].set_title('value')
l, b, w, h = sns_plot.fig.axes[0]._position.bounds
sns_plot.fig.add_axes((l+w+0.1, b, w, h))
sns_plot_2 = sns.relplot(x=xvar, y='dist', hue='description', row='problem', kind='line', data=pd_frame, legend=False, ax=sns_plot.fig.axes[1])
sns_plot.fig.axes[1].set_title('distance')
l, b, w, h = sns_plot.fig.axes[1]._position.bounds
sns_plot.fig.add_axes((l+w+0.1, b, w, h))
sns_plot_2 = sns.relplot(x=xvar, y='len', hue='description', row='problem', kind='line', data=pd_frame, legend=False, ax=sns_plot.fig.axes[2])
sns_plot.fig.axes[2].set_title('length')
keyid = ''
for key in keywords:
keyid += '_'+str(key)
sns_plot.fig.savefig(SAVE_DIR+'/allgraphs_{0}_{1}.png'.format(keyid, lab), bbox_inches="tight")
for no, nt in exp_data:
print(('Plotting', no, nt, exp_name))
pd_frame = pd.DataFrame(exp_data[no, nt], columns=['exp_name', xvar, 'value'])
sns.set()
sns_plot = sns.relplot(x=xvar, y='value', hue='exp_name', kind='line', data=pd_frame)
sns_plot._legend.remove()
sns_plot.fig.get_axes()[0].legend(loc=(1.5, 1.5))
keyid = ''
for key in keywords[:1]:
keyid += '_{0}'.format(key)
pre_lab = '_pre' if pre else ''
if rerun: pre_lab += '_rerun'
sns_plot.fig.savefig(SAVE_DIR+'/{0}obj_{1}targ_val{2}{3}{4}.png'.format(no, nt, keyid, pre_lab, lab), bbox_inches="tight")
for no, nt in exp_len_data:
print(('Plotting', no, nt, exp_name))
pd_frame = pd.DataFrame(exp_len_data[no, nt], columns=['exp_name', xvar, 'length'])
sns.set()
sns_plot = sns.relplot(x=xvar, y='length', hue='exp_name', kind='line', data=pd_frame)
keyid = ''
for key in keywords[:1]:
keyid += '_{0}'.format(key)
pre_lab = '_pre' if pre else ''
if rerun: pre_lab += '_rerun'
sns_plot.savefig(SAVE_DIR+'/{0}obj_{1}targ_len{2}{3}{4}.png'.format(no, nt, keyid, pre_lab, lab))
for no, nt in exp_dist_data:
print(('Plotting', no, nt, exp_name))
pd_frame = pd.DataFrame(exp_dist_data[no, nt], columns=['exp_name', xvar, 'disp'])
sns.set()
sns_plot = sns.relplot(x=xvar, y='disp', hue='exp_name', kind='line', data=pd_frame)
keyid = ''
for key in keywords[:1]:
keyid += '_{0}'.format(key)
pre_lab = '_pre' if pre else ''
if rerun: pre_lab += '_rerun'
sns_plot.savefig(SAVE_DIR+'/{0}obj_{1}targ_disp{2}{3}{4}.png'.format(no, nt, keyid, pre_lab, lab))
for no, nt in exp_true_data:
print(('Plotting', no, nt, exp_name))
pd_frame = pd.DataFrame(exp_true_data[no, nt], columns=['exp_name', xvar, 'true'])
sns.set()
sns_plot = sns.relplot(x=xvar, y='true', hue='exp_name', kind='line', data=pd_frame)
keyid = ''
for key in keywords[:-1]:
keyid += '_{0}'.format(key)
pre_lab = '_pre' if pre else ''
if rerun: pre_lab += '_rerun'
sns_plot.savefig(SAVE_DIR+'/{0}obj_{1}targ_true{2}{3}{4}.png'.format(no, nt, keyid, pre_lab, lab))
def plot(data, columns, descr, xvars, yvars, separate=True, keyind=0, inter=100, rolling=True, window=100, xlim=None, ylim=None, fname='', witherr=False, style=None):
sns.set()
for k in data:
if not len(data[k]): continue
pd_frame = pd.DataFrame(data[k], columns=columns)
datetime_var = xvars[0]+'_datetime'
pd_frame[datetime_var] = | pd.to_datetime(pd_frame[xvars[0]], unit='s') | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
# In[66]:
#置入所需套件
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
# Load in the data
df = pd.read_csv("InterestsSurvey.csv")
#檢查遺漏值
null = df.isnull().sum()
print('number of missing value')
print(null)
#Data pre-processing
#遺漏值處理
null = df.isnull().sum()
print('number of missing value')
print(null)
#遺漏值用0填補
df = df.fillna(0)
#Data pre-processing
#刪除離群值
#有人填太少興趣或太多興趣,難以達到聚類的目的
df['grand_tot_interests'].describe()
#平均是37.31,最小值是1,最大值是104
#找到Q1,Q3
q1, q3 = np.percentile(df['grand_tot_interests'], [25, 75])
print(f"Q1 is: {q1}, Q3 is: {q3}\n")
#Q1=28,Q3=48
#繪製箱型圖,確認離群值是否存在
df['grand_tot_interests'].plot.box(title="Box Chart")
plt.grid(linestyle="--", alpha=0.3)
plt.show()
#找到離群值的判斷上下界
above = q3 + 1.5 * (q3 - q1)
below = q1 - 1.5 * (q3 - q1)
print(f"Above is: {above}, Below is: {below}\n")
#above是78,below是-2
#過濾掉離群值
df = df[~(np.abs(df['grand_tot_interests'] > 78))]
df = df[~(np.abs(df['grand_tot_interests'] < -2))]
#繪製箱型圖,確認離群值是否刪除成功
df['grand_tot_interests'].plot.box(title="Box Chart")
plt.grid(linestyle="--", alpha=0.3)
plt.show()
#Data pre-processing
#group成類別變數
one_hot = pd.get_dummies(df['group'])
df = df.drop('group',axis = 1)
# Join the encoded df
df = df.join(one_hot)
#降維
#捨去low variance的feature
#想要去掉那些超過80%的樣本都取值為0或1的所有特徵,變異數為p(1-p),伯努力分布.
from sklearn.feature_selection import VarianceThreshold
def variance_threshold_selector(data, threshold=(.8 * (1 - .8))):
selector = VarianceThreshold(threshold)
selector.fit(data)
return data[data.columns[selector.get_support(indices=True)]]
df = variance_threshold_selector(df)
df.shape #(6331, 53)
#降維
# PCA
x = df.drop(['grand_tot_interests'],axis=1)
#判斷PCA要取幾個主成分
pca = PCA(n_components=20)
principalComponents = pca.fit_transform(x)
# Plot the explained variances
features = range(pca.n_components_)
plt.bar(features, pca.explained_variance_ratio_, color='black')
plt.xlabel('PCA features')
plt.ylabel('variance %')
plt.xticks(features)
#結論要取兩個
#取兩個PCA
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
# Save components to a DataFrame
PCA_components = pd.DataFrame(principalComponents)
#繪製兩大主成分背後的原始資料特徵
pcs = np.array(pca.components_) # (n_comp, n_features)
df_pc = | pd.DataFrame(pcs, columns=df.columns[1:]) | pandas.DataFrame |
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d, CubicSpline
from pandas.tseries.offsets import DateOffset
from pennies.market.curves import DiscountCurveWithNodes
from pennies.market.market import RatesTermStructure
# 1. Define Valuation Date
dt_val = pd.to_datetime('today')
# 2. Define the Market Prices to Calibrate to.
# In our example, we create 3 Swaps, with an upward sloping yield
dt_settle = dt_val
frqncy = 6
curr = "USD"
durations = [12, 24, 60]
fixed_rates = [0.03, 0.04, 0.05] # These are the 'prices'
notional = 100
def test_rates_term_structure():
freq = DateOffset(months=6)
sched = pd.date_range(dt_val, dt_val + | DateOffset(months=24) | pandas.tseries.offsets.DateOffset |
import re
import pandas as pd
from src.mapping.columns.column_label_catalog import ColumnLabelCatalog
from src.mapping.columns.column_name_classifier import ColumnNameClassifier
class PseudocolumnGenerator:
def __init__(self, data_source, concatenation_delimiter: str = " "):
"""Initialize new pseudocolumn generator."""
self.data_source = data_source
self.delimiter = concatenation_delimiter
if not self.data_source.is_structured():
raise Exception("Can't create pseudocolumns from unstructured data")
def generate(self):
"""Generate columns based on expected combinations."""
self._get_canonical_columns()
# Name groups
if (
self._can_generate_full_name()
and ColumnLabelCatalog.FULL_NAME not in self.canonical_columns
):
self.data_source.append_column(self._generate_full_name(), "full_name")
elif (
self._can_generate_first_last_name()
and ColumnLabelCatalog.FULL_NAME not in self.canonical_columns
):
self.data_source.append_column(
self._generate_first_last_name(), "full_name"
)
def _get_canonical_columns(self):
"""Retrieve canonical column names to concatenate."""
self.canonical_columns = ColumnNameClassifier.get_id_info_from_df(
self.data_source.get_data()
)
def _can_generate_full_name(self):
"""Check if we can generate a full name from the existing data."""
return (
ColumnLabelCatalog.FIRST_NAME in self.canonical_columns
and ColumnLabelCatalog.MIDDLE_NAME in self.canonical_columns
and ColumnLabelCatalog.LAST_NAME in self.canonical_columns
)
def _can_generate_first_last_name(self):
"""Check if we can generate a full name from the existing data."""
return (
ColumnLabelCatalog.FIRST_NAME in self.canonical_columns
and ColumnLabelCatalog.MIDDLE_NAME not in self.canonical_columns
and ColumnLabelCatalog.LAST_NAME in self.canonical_columns
)
def _generate_full_name(self):
"""Generate a full name column if we have the appropriate input data."""
first = self.data_source.get_column(
self.canonical_columns[ColumnLabelCatalog.FIRST_NAME].table_column_name
)
middle = self.data_source.get_column(
self.canonical_columns[ColumnLabelCatalog.MIDDLE_NAME].table_column_name
)
last = self.data_source.get_column(
self.canonical_columns[ColumnLabelCatalog.LAST_NAME].table_column_name
)
return [
self._join_name_pieces([first[i], middle[i], last[i]])
for i in range(len(first))
]
def _generate_first_last_name(self):
"""Generate a full name column if we have the appropriate input data."""
first = self.data_source.get_column(
self.canonical_columns[ColumnLabelCatalog.FIRST_NAME].table_column_name
)
last = self.data_source.get_column(
self.canonical_columns[ColumnLabelCatalog.LAST_NAME].table_column_name
)
return [self._join_name_pieces([first[i], last[i]]) for i in range(len(first))]
def _join_name_pieces(self, name_pieces):
for i in range(len(name_pieces)):
if | pd.isnull(name_pieces[i]) | pandas.isnull |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
sl_stop = pd.Series([0.4, np.nan, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override', 'overridenan']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 2, 2.0, 3.0, 0.0, 1),
(6, 2, 0, 1.0, 5.0, 0.0, 0), (7, 2, 1, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
sl_stop = pd.Series([0.4, 0.4, np.nan, np.nan, np.nan])
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits, accumulate=True, size=1.,
sl_stop=sl_stop, upon_stop_update=[['keep', 'override']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 0), (2, 0, 2, 2.0, 3.0, 0.0, 1),
(3, 1, 0, 1.0, 5.0, 0.0, 0), (4, 1, 1, 1.0, 4.0, 0.0, 0), (5, 1, 3, 2.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_sl_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
@njit
def adjust_sl_func_nb(c, dur):
return 0. if c.i - c.init_i >= dur else c.curr_stop, c.curr_trail
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=np.inf, adjust_sl_func_nb=adjust_sl_func_nb, adjust_sl_args=(2,)).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0), (1, 0, 2, 20.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_adjust_ts_func(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = | pd.Series([10., 11., 12., 11., 10.], index=price.index) | pandas.Series |
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
import copy
import logging
import numpy as np
import pandas as pd
from typing import Any, Callable, Dict, List, Optional, Union
from sklearn.utils import check_consistent_length
import warnings
from functools import wraps
from fairlearn.metrics._input_manipulations import _convert_to_ndarray_and_squeeze
from ._function_container import FunctionContainer, _SAMPLE_PARAMS_NOT_DICT
from ._group_feature import GroupFeature
logger = logging.getLogger(__name__)
_SUBGROUP_COUNT_WARNING_THRESHOLD = 20
_SF_DICT_CONVERSION_FAILURE = "DataFrame.from_dict() failed on sensitive features. " \
"Please ensure each array is strictly 1-D."
_BAD_FEATURE_LENGTH = "Received a feature of length {0} when length {1} was expected"
_SUBGROUP_COUNT_WARNING = "Found {0} subgroups. Evaluation may be slow"
_FEATURE_LIST_NONSCALAR = "Feature lists must be of scalar types"
_FEATURE_DF_COLUMN_BAD_NAME = "DataFrame column names must be strings. Name '{0}' is of type {1}"
_DUPLICATE_FEATURE_NAME = "Detected duplicate feature name: '{0}'"
_TOO_MANY_FEATURE_DIMS = "Feature array has too many dimensions"
_SAMPLE_PARAM_KEYS_NOT_IN_FUNC_DICT = \
"Keys in 'sample_params' do not match those in 'metric'"
def _deprecate_metric_frame_init(new_metric_frame_init):
"""Issue deprecation warnings for the `MetricFrame` constructor.
Decorator to issue warnings if called with positional arguments
or with the keyword argument `metric` instead of `metrics`.
Parameters
----------
new_metric_frame_init : callable
New MetricFrame constructor.
"""
@wraps(new_metric_frame_init)
def compatible_metric_frame_init(self, *args, metric=None, **kwargs):
positional_names = ["metrics", "y_true", "y_pred"]
version = "0.10.0"
positional_dict = dict(zip(positional_names, args))
# If more than 3 positional arguments are provided (apart from self), show
# the error message applicable to the new constructor implementation (with `self`
# being the only positional argument).
if len(args) > 3:
raise TypeError(f"{new_metric_frame_init.__name__}() takes 1 positional "
f"argument but {1+len(args)} positional arguments "
f"were given")
# If 1-3 positional arguments are provided (apart fom self), issue warning.
if len(args) > 0:
args_msg = ", ".join([f"'{name}'" for name in positional_dict.keys()])
warnings.warn(f"You have provided {args_msg} as positional arguments. "
f"Please pass them as keyword arguments. From version "
f"{version} passing them as positional arguments "
f"will result in an error.",
FutureWarning)
# If a keyword argument `metric` is provided, issue warning.
metric_arg_dict = {}
if metric is not None:
metric_arg_dict = {"metrics": metric}
warnings.warn(f"The positional argument 'metric' has been replaced "
f"by a keyword argument 'metrics'. "
f"From version {version} passing it as a positional argument "
f"or as a keyword argument 'metric' will result in an error",
FutureWarning)
# Call the new constructor with positional arguments passed as keyword arguments
# and with the `metric` keyword argument renamed to `metrics`.
new_metric_frame_init(self,
**metric_arg_dict,
**positional_dict,
**kwargs)
return compatible_metric_frame_init
class MetricFrame:
"""Collection of disaggregated metric values.
This data structure stores and manipulates disaggregated values for any number of underlying
metrics. At least one sensitive feature must be supplied, which is used
to split the data into subgroups. The underlying metric(s) is(are) calculated
across the entire dataset (made available by the :attr:`.overall` property) and
for each identified subgroup (made available by the :attr:`.by_group` property).
The only limitations placed on the metric functions are that:
* The first two arguments they take must be ``y_true`` and ``y_pred`` arrays
* Any other arguments must correspond to sample properties (such as sample weights),
meaning that their first dimension is the same as that of y_true and y_pred. These
arguments will be split up along with the ``y_true`` and ``y_pred`` arrays
The interpretation of the ``y_true`` and ``y_pred`` arrays is up to the
underlying metric - it is perfectly possible to pass in lists of class
probability tuples. We also support non-scalar return types for the
metric function (such as confusion matrices) at the current time. However,
the aggregation functions will not be well defined in this case.
Group fairness metrics are obtained by methods that implement
various aggregators over group-level metrics, such such as the
maximum, minimum, or the worst-case difference or ratio.
This data structure also supports the concept of 'control features.' Like the sensitive
features, control features identify subgroups within the data, but
aggregations are not performed over the control features. Instead, the
aggregations produce a result for each subgroup identified by the control
feature(s). The name 'control features' refers to the statistical practice
of 'controlling' for a variable.
Parameters
----------
metrics : callable or dict
The underlying metric functions which are to be calculated. This
can either be a single metric function or a dictionary of functions.
These functions must be callable as
``fn(y_true, y_pred, **sample_params)``.
If there are any other arguments required (such as ``beta`` for
:func:`sklearn.metrics.fbeta_score`) then
:func:`functools.partial` must be used.
**Note** that the values returned by various members of the class change
based on whether this argument is a callable or a dictionary of
callables. This distinction remains *even if* the dictionary only
contains a single entry.
y_true : List, pandas.Series, numpy.ndarray, pandas.DataFrame
The ground-truth labels (for classification) or target values (for regression).
y_pred : List, pandas.Series, numpy.ndarray, pandas.DataFrame
The predictions.
sensitive_features : List, pandas.Series, dict of 1d arrays, numpy.ndarray, pandas.DataFrame
The sensitive features which should be used to create the subgroups.
At least one sensitive feature must be provided.
All names (whether on pandas objects or dictionary keys) must be strings.
We also forbid DataFrames with column names of ``None``.
For cases where no names are provided we generate names ``sensitive_feature_[n]``.
control_features : List, pandas.Series, dict of 1d arrays, numpy.ndarray, pandas.DataFrame
Control features are similar to sensitive features, in that they
divide the input data into subgroups.
Unlike the sensitive features, aggregations are not performed
across the control features - for example, the ``overall`` property
will have one value for each subgroup in the control feature(s),
rather than a single value for the entire data set.
Control features can be specified similarly to the sensitive features.
However, their default names (if none can be identified in the
input values) are of the format ``control_feature_[n]``.
**Note** the types returned by members of the class vary based on whether
control features are present.
sample_params : dict
Parameters for the metric function(s). If there is only one metric function,
then this is a dictionary of strings and array-like objects, which are split
alongside the ``y_true`` and ``y_pred`` arrays, and passed to the metric function.
If there are multiple metric functions (passed as a dictionary), then this is
a nested dictionary, with the first set of string keys identifying the
metric function name, with the values being the string-to-array-like dictionaries.
metric : callable or dict
The underlying metric functions which are to be calculated. This
can either be a single metric function or a dictionary of functions.
These functions must be callable as
``fn(y_true, y_pred, **sample_params)``.
If there are any other arguments required (such as ``beta`` for
:func:`sklearn.metrics.fbeta_score`) then
:func:`functools.partial` must be used.
.. deprecated:: 0.7.0
`metric` will be removed in version 0.10.0, use `metrics` instead.
"""
# The deprecation decorator does two things:
# (1) turns first three positional arguments into keyword arguments
# (2) renames the 'metric' keyword argument into 'metrics'
@_deprecate_metric_frame_init
def __init__(self,
*,
metrics: Union[Callable, Dict[str, Callable]],
y_true,
y_pred,
sensitive_features,
control_features: Optional = None,
sample_params: Optional[Union[Dict[str, Any], Dict[str, Dict[str, Any]]]] = None):
"""Read a placeholder comment."""
check_consistent_length(y_true, y_pred)
y_t = _convert_to_ndarray_and_squeeze(y_true)
y_p = _convert_to_ndarray_and_squeeze(y_pred)
func_dict = self._process_functions(metrics, sample_params)
# Now, prepare the sensitive features
sf_list = self._process_features("sensitive_feature_", sensitive_features, y_t)
self._sf_names = [x.name for x in sf_list]
# Prepare the control features
# Adjust _sf_indices if needed
cf_list = None
self._cf_names = None
if control_features is not None:
cf_list = self._process_features("control_feature_", control_features, y_t)
self._cf_names = [x.name for x in cf_list]
# Check for duplicate feature names
nameset = set()
namelist = self._sf_names
if self._cf_names:
namelist = namelist + self._cf_names
for name in namelist:
if name in nameset:
raise ValueError(_DUPLICATE_FEATURE_NAME.format(name))
nameset.add(name)
self._overall = self._compute_overall(func_dict, y_t, y_p, cf_list)
self._by_group = self._compute_by_group(func_dict, y_t, y_p, sf_list, cf_list)
def _compute_overall(self, func_dict, y_true, y_pred, cf_list):
if cf_list is None:
result = pd.Series(index=func_dict.keys(), dtype='object')
for func_name in func_dict:
metric_value = func_dict[func_name].evaluate_all(y_true, y_pred)
result[func_name] = metric_value
else:
result = self._compute_dataframe_from_rows(func_dict, y_true, y_pred, cf_list)
return result
def _compute_by_group(self, func_dict, y_true, y_pred, sf_list, cf_list):
rows = copy.deepcopy(sf_list)
if cf_list is not None:
# Prepend the conditional features, so they are 'higher'
rows = copy.deepcopy(cf_list) + rows
return self._compute_dataframe_from_rows(func_dict, y_true, y_pred, rows)
def _compute_dataframe_from_rows(self, func_dict, y_true, y_pred, rows):
if len(rows) == 1:
row_index = pd.Index(data=rows[0].classes, name=rows[0].name)
else:
row_index = pd.MultiIndex.from_product([x.classes for x in rows],
names=[x.name for x in rows])
if len(row_index) > _SUBGROUP_COUNT_WARNING_THRESHOLD:
msg = _SUBGROUP_COUNT_WARNING.format(len(row_index))
logger.warning(msg)
result = pd.DataFrame(index=row_index, columns=func_dict.keys())
for func_name in func_dict:
for row_curr in row_index:
mask = None
if len(rows) > 1:
mask = self._mask_from_tuple(row_curr, rows)
else:
# Have to force row_curr to be an unary tuple
mask = self._mask_from_tuple((row_curr,), rows)
# Only call the metric function if the mask is non-empty
if sum(mask) > 0:
curr_metric = func_dict[func_name].evaluate(y_true, y_pred, mask)
result[func_name][row_curr] = curr_metric
return result
@property
def overall(self) -> Union[Any, pd.Series, pd.DataFrame]:
"""Return the underlying metrics evaluated on the whole dataset.
Returns
-------
typing.Any or pandas.Series or pandas.DataFrame
The exact type varies based on whether control featuers were
provided and how the metric functions were specified.
======== ================ =================================
Metrics Control Features Result Type
======== ================ =================================
Callable None Return type of callable
-------- ---------------- ---------------------------------
Callable Provided Series, indexed by the subgroups
of the conditional feature(s)
-------- ---------------- ---------------------------------
Dict None Series, indexed by the metric
names
-------- ---------------- ---------------------------------
Dict Provided DataFrame. Columns are
metric names, rows are subgroups
of conditional feature(s)
======== ================ =================================
The distinction applies even if the dictionary contains a
single metric function. This is to allow for a consistent
interface when calling programatically, while also reducing
typing for those using Fairlearn interactively.
"""
if self._user_supplied_callable:
if self.control_levels:
return self._overall.iloc[:, 0]
else:
return self._overall.iloc[0]
else:
return self._overall
@property
def by_group(self) -> Union[pd.Series, pd.DataFrame]:
"""Return the collection of metrics evaluated for each subgroup.
The collection is defined by the combination of classes in the
sensitive and control features. The exact type depends on
the specification of the metric function.
Returns
-------
pandas.Series or pandas.DataFrame
When a callable is supplied to the constructor, the result is
a :class:`pandas.Series`, indexed by the combinations of subgroups
in the sensitive and control features.
When the metric functions were specified with a dictionary (even
if the dictionary only has a single entry), then the result is
a :class:`pandas.DataFrame` with columns named after the metric
functions, and rows indexed by the combinations of subgroups
in the sensitive and control features.
If a particular combination of subgroups was not present in the dataset
(likely to occur as more sensitive and control features
are specified), then the corresponding entry will be NaN.
"""
if self._user_supplied_callable:
return self._by_group.iloc[:, 0]
else:
return self._by_group
@property
def control_levels(self) -> List[str]:
"""Return a list of feature names which are produced by control features.
If control features are present, then the rows of the :attr:`.by_group`
property have a :class:`pandas.MultiIndex` index. This property
identifies which elements of that index are control features.
Returns
-------
List[str] or None
List of names, which can be used in calls to
:meth:`pandas.DataFrame.groupby` etc.
"""
return self._cf_names
@property
def sensitive_levels(self) -> List[str]:
"""Return a list of the feature names which are produced by sensitive features.
In cases where the :attr:`.by_group` property has a :class:`pandas.MultiIndex`
index, this identifies which elements of the index are sensitive features.
Returns
-------
List[str]
List of names, which can be used in calls to
:meth:`pandas.DataFrame.groupby` etc.
"""
return self._sf_names
def group_max(self) -> Union[Any, pd.Series, pd.DataFrame]:
"""Return the maximum value of the metric over the sensitive features.
This method computes the maximum value over all combinations of
sensitive features for each underlying metric function in the :attr:`.by_group`
property (it will only succeed if all the underlying metric
functions return scalar values). The exact return type depends on
whether control features are present, and whether the metric functions
were specified as a single callable or a dictionary.
Returns
-------
typing.Any or pandas.Series or pandas.DataFrame
The maximum value over sensitive features. The exact type
follows the table in :attr:`.MetricFrame.overall`.
"""
if not self.control_levels:
result = pd.Series(index=self._by_group.columns, dtype='object')
for m in result.index:
max_val = self._by_group[m].max()
result[m] = max_val
else:
result = self._by_group.groupby(level=self.control_levels).max()
if self._user_supplied_callable:
if self.control_levels:
return result.iloc[:, 0]
else:
return result.iloc[0]
else:
return result
def group_min(self) -> Union[Any, pd.Series, pd.DataFrame]:
"""Return the minimum value of the metric over the sensitive features.
This method computes the minimum value over all combinations of
sensitive features for each underlying metric function in the :attr:`.by_group`
property (it will only succeed if all the underlying metric
functions return scalar values). The exact return type depends on
whether control features are present, and whether the metric functions
were specified as a single callable or a dictionary.
Returns
-------
typing.Any pandas.Series or pandas.DataFrame
The minimum value over sensitive features. The exact type
follows the table in :attr:`.MetricFrame.overall`.
"""
if not self.control_levels:
result = pd.Series(index=self._by_group.columns, dtype='object')
for m in result.index:
min_val = self._by_group[m].min()
result[m] = min_val
else:
result = self._by_group.groupby(level=self.control_levels).min()
if self._user_supplied_callable:
if self.control_levels:
return result.iloc[:, 0]
else:
return result.iloc[0]
else:
return result
def difference(self,
method: str = 'between_groups') -> Union[Any, pd.Series, pd.DataFrame]:
"""Return the maximum absolute difference between groups for each metric.
This method calculates a scalar value for each underlying metric by
finding the maximum absolute difference between the entries in each
combination of sensitive features in the :attr:`.by_group` property.
Similar to other methods, the result type varies with the
specification of the metric functions, and whether control features
are present or not.
There are two allowed values for the ``method=`` parameter. The
value ``between_groups`` computes the maximum difference between
any two pairs of groups in the :attr:`.by_group` property (i.e.
``group_max() - group_min()``). Alternatively, ``to_overall``
computes the difference between each subgroup and the
corresponding value from :attr:`.overall` (if there are control
features, then :attr:`.overall` is multivalued for each metric).
The result is the absolute maximum of these values.
Parameters
----------
method : str
How to compute the aggregate. Default is :code:`between_groups`
Returns
-------
typing.Any or pandas.Series or pandas.DataFrame
The exact type follows the table in :attr:`.MetricFrame.overall`.
"""
subtrahend = np.nan
if method == 'between_groups':
subtrahend = self.group_min()
elif method == 'to_overall':
subtrahend = self.overall
else:
raise ValueError("Unrecognised method '{0}' in difference() call".format(method))
return (self.by_group - subtrahend).abs().max(level=self.control_levels)
def ratio(self,
method: str = 'between_groups') -> Union[Any, pd.Series, pd.DataFrame]:
"""Return the minimum ratio between groups for each metric.
This method calculates a scalar value for each underlying metric by
finding the minimum ratio (that is, the ratio is forced to be
less than unity) between the entries in each
column of the :attr:`.by_group` property.
Similar to other methods, the result type varies with the
specification of the metric functions, and whether control features
are present or not.
There are two allowed values for the ``method=`` parameter. The
value ``between_groups`` computes the minimum ratio between
any two pairs of groups in the :attr:`.by_group` property (i.e.
``group_min() / group_max()``). Alternatively, ``to_overall``
computes the ratio between each subgroup and the
corresponding value from :attr:`.overall` (if there are control
features, then :attr:`.overall` is multivalued for each metric),
expressing the ratio as a number less than 1.
The result is the minimum of these values.
Parameters
----------
method : str
How to compute the aggregate. Default is :code:`between_groups`
Returns
-------
typing.Any or pandas.Series or pandas.DataFrame
The exact type follows the table in :attr:`.MetricFrame.overall`.
"""
result = None
if method == 'between_groups':
result = self.group_min() / self.group_max()
elif method == 'to_overall':
if self._user_supplied_callable:
tmp = self.by_group / self.overall
result = tmp.transform(lambda x: min(x, 1/x)).min(level=self.control_levels)
else:
ratios = None
if self.control_levels:
# It's easiest to give in to the DataFrame columns preference
ratios = self.by_group.unstack(level=self.control_levels) / \
self.overall.unstack(level=self.control_levels)
else:
ratios = self.by_group / self.overall
def ratio_sub_one(x):
if x > 1:
return 1/x
else:
return x
ratios = ratios.apply(lambda x: x.transform(ratio_sub_one))
if not self.control_levels:
result = ratios.min()
else:
result = ratios.min().unstack(0)
else:
raise ValueError("Unrecognised method '{0}' in ratio() call".format(method))
return result
def _process_functions(self, metric, sample_params) -> Dict[str, FunctionContainer]:
"""Get the underlying metrics into :class:`fairlearn.metrics.FunctionContainer` objects."""
self._user_supplied_callable = True
func_dict = dict()
if isinstance(metric, dict):
self._user_supplied_callable = False
s_p = dict()
if sample_params is not None:
if not isinstance(sample_params, dict):
raise ValueError(_SAMPLE_PARAMS_NOT_DICT)
sp_keys = set(sample_params.keys())
mf_keys = set(metric.keys())
if not sp_keys.issubset(mf_keys):
raise ValueError(_SAMPLE_PARAM_KEYS_NOT_IN_FUNC_DICT)
s_p = sample_params
for name, func in metric.items():
curr_s_p = None
if name in s_p:
curr_s_p = s_p[name]
fc = FunctionContainer(func, name, curr_s_p)
func_dict[fc.name_] = fc
else:
fc = FunctionContainer(metric, None, sample_params)
func_dict[fc.name_] = fc
return func_dict
def _process_features(self, base_name, features, sample_array) -> List[GroupFeature]:
"""Extract the features into :class:`fairlearn.metrics.GroupFeature` objects."""
result = []
if isinstance(features, pd.Series):
check_consistent_length(features, sample_array)
result.append(GroupFeature(base_name, features, 0, None))
elif isinstance(features, pd.DataFrame):
for i in range(len(features.columns)):
col_name = features.columns[i]
if not isinstance(col_name, str):
msg = _FEATURE_DF_COLUMN_BAD_NAME.format(col_name, type(col_name))
raise ValueError(msg)
column = features.iloc[:, i]
check_consistent_length(column, sample_array)
result.append(GroupFeature(base_name, column, i, None))
elif isinstance(features, list):
if np.isscalar(features[0]):
f_arr = np.atleast_1d(np.squeeze(np.asarray(features)))
assert len(f_arr.shape) == 1 # Sanity check
check_consistent_length(f_arr, sample_array)
result.append(GroupFeature(base_name, f_arr, 0, None))
else:
raise ValueError(_FEATURE_LIST_NONSCALAR)
elif isinstance(features, dict):
try:
df = | pd.DataFrame.from_dict(features) | pandas.DataFrame.from_dict |
import unittest
import pandas as pd
from pandas.util.testing import assert_series_equal
import numpy as np
from easyframes.easyframes import hhkit
class TestStataMerge(unittest.TestCase):
def setUp(self):
"""
df_original = pd.read_csv('sample_hh_dataset.csv')
df = df_original.copy()
print(df.to_dict())
"""
self.df_master = pd.DataFrame(
{'educ': {0: 'secondary', 1: 'bachelor', 2: 'primary', 3: 'higher', 4: 'bachelor', 5: 'secondary',
6: 'higher', 7: 'higher', 8: 'primary', 9: 'primary'},
'hh': {0: 1, 1: 1, 2: 1, 3: 2, 4: 3, 5: 3, 6: 4, 7: 4, 8: 4, 9: 4},
'id': {0: 1, 1: 2, 2: 3, 3: 1, 4: 1, 5: 2, 6: 1, 7: 2, 8: 3, 9: 4},
'has_car': {0: 1, 1: 1, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1, 9: 1},
'weighthh': {0: 2, 1: 2, 2: 2, 3: 3, 4: 2, 5: 2, 6: 3, 7: 3, 8: 3, 9: 3},
'house_rooms': {0: 3, 1: 3, 2: 3, 3: 2, 4: 1, 5: 1, 6: 3, 7: 3, 8: 3, 9: 3},
'prov': {0: 'BC', 1: 'BC', 2: 'BC', 3: 'Alberta', 4: 'BC', 5: 'BC', 6: 'Alberta',
7: 'Alberta', 8: 'Alberta', 9: 'Alberta'},
'age': {0: 44, 1: 43, 2: 13, 3: 70, 4: 23, 5: 20, 6: 37, 7: 35, 8: 8, 9: 15},
'fridge': {0: 'yes', 1: 'yes', 2: 'yes', 3: 'no', 4: 'yes', 5: 'yes', 6: 'no',
7: 'no', 8: 'no', 9: 'no'},
'male': {0: 1, 1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 0, 9: 0}})
self.df_using_hh = | pd.DataFrame(
{'hh': {0: 2, 1: 4, 2: 5, 3: 6, 4: 7},
'has_fence': {0: 1, 1: 0, 2: 1, 3: 1, 4: 0}
}) | pandas.DataFrame |
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import time
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer, LabelEncoder
from sklearn.metrics import confusion_matrix
import itertools
from keras.utils import plot_model
from keras.callbacks import EarlyStopping
from keras.models import Sequential
from keras import optimizers
from keras.layers import Conv1D, MaxPooling1D, Embedding, LSTM, SpatialDropout1D
from keras.layers import Activation, Dropout, Flatten, Dense, BatchNormalization
from keras import backend as K
from keras.preprocessing import text, sequence
from keras import utils
from keras import regularizers
import nltk
from nltk.corpus import stopwords
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def load_more_data():
midpoint = 13000
size = 7500
raw_data = pd.read_csv("data/combined.csv")
# raw_data = pd.read_csv("data/fake_or_real_news.csv")
# all_data = [cleanText(str(val)) for val in raw_data['text'].values]
all_data = [cleanText(str(val)) for val in raw_data['text'][int(midpoint-size/2):int(midpoint+size/2)].values]
all_labels = raw_data['real'][int(midpoint-size/2):int(midpoint+size/2)].values
# all_labels = [0 if val == "FAKE" else 1 for val in raw_data['label'].values]
all_data = np.array(all_data)
all_labels = np.array(all_labels)
x = pd.Series(all_data)
y = pd.Series(all_labels)
data = {}
data['text'] = np.array(list(x))
data['label'] = np.array(list(y))
# return all_data, all_labels
return pd.DataFrame.from_dict(data)
def load_data():
midpoint = 13000
size = 7000
# raw_data = pd.read_csv("data/combined.csv")
raw_data = pd.read_csv("data/fake_or_real_news.csv")
all_data = [cleanText(str(val)) for val in raw_data['text'].values]
# all_data = [cleanText(str(val)) for val in raw_data['text'][int(midpoint-size/2):int(midpoint+size/2)].values]
all_labels = [0 if val == "FAKE" else 1 for val in raw_data['label'].values]
# all_labels = raw_data['real'][int(midpoint-size/2):int(midpoint+size/2)].values
# more_x, more_y = load_more_data()
all_data = np.array(all_data)
all_labels = np.array(all_labels)
# all_data = np.concatenate((np.array(all_data), more_x), axis=0)
# all_labels = np.concatenate((np.array(all_labels), more_y), axis=0)
x_train, x_test, y_train, y_test = train_test_split(all_data, all_labels, test_size=0.2, random_state=42)
x_train = | pd.Series(x_train) | pandas.Series |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import logging
import numpy as np
import pandas as pd
class TerrplantFunctions(object):
"""
Function class for Stir.
"""
def __init__(self):
"""Class representing the functions for Sip"""
super(TerrplantFunctions, self).__init__()
def run_dry(self):
"""
EEC for runoff for dry areas
"""
self.out_run_dry = (self.application_rate / self.incorporation_depth) * self.runoff_fraction
return self.out_run_dry
def run_semi(self):
"""
EEC for runoff to semi-aquatic areas
"""
self.out_run_semi = (self.application_rate / self.incorporation_depth) * self.runoff_fraction * 10
return self.out_run_semi
def spray(self):
"""
EEC for spray drift
"""
self.out_spray = self.application_rate * self.drift_fraction
return self.out_spray
def total_dry(self):
"""
EEC total for dry areas
"""
self.out_total_dry = self.out_run_dry + self.out_spray
return self.out_total_dry
def total_semi(self):
"""
EEC total for semi-aquatic areas
"""
self.out_total_semi = self.out_run_semi + self.out_spray
return self.out_total_semi
def nms_rq_dry(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a DRY area
"""
self.out_nms_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_dry
def loc_nms_dry(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a dry area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_dry]
self.out_nms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
# exceed_boolean = self.out_nms_rq_dry >= 1.0
# self.out_nms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nms_loc_dry
def nms_rq_semi(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_nms_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_monocot
return self.out_nms_rq_semi
def loc_nms_semi(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide X in a semi-aquatic area
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_semi]
self.out_nms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_semi >= 1.0
#self.out_nms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nms_loc_semi
def nms_rq_spray(self):
"""
Risk Quotient for NON-LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nms_rq_spray = self.out_spray / self.out_min_nms_spray
return self.out_nms_rq_spray
def loc_nms_spray(self):
"""
Level of concern for non-listed monocot seedlings exposed to pesticide via spray drift
"""
msg_pass = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nms_rq_spray]
self.out_nms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nms_rq_spray >= 1.0
#self.out_nms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nms_loc_spray
def lms_rq_dry(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a DRY areas
"""
self.out_lms_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_dry
def loc_lms_dry(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide via runoff in a dry area
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_dry]
self.out_lms_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_dry >= 1.0
#self.out_lms_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lms_loc_dry
def lms_rq_semi(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X in a SEMI-AQUATIC area
"""
self.out_lms_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_monocot
return self.out_lms_rq_semi
def loc_lms_semi(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_semi]
self.out_lms_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_semi >= 1.0
#self.out_lms_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_lms_loc_semi
def lms_rq_spray(self):
"""
Risk Quotient for LISTED MONOCOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lms_rq_spray = self.out_spray / self.out_min_lms_spray
return self.out_lms_rq_spray
def loc_lms_spray(self):
"""
Level of concern for listed monocot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lms_rq_spray]
self.out_lms_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lms_rq_spray >= 1.0
#self.out_lms_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed monocot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_lms_loc_spray
def nds_rq_dry(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_nds_rq_dry = self.out_total_dry / self.ec25_nonlisted_seedling_emergence_dicot
return self.out_nds_rq_dry
def loc_nds_dry(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_dry]
self.out_nds_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_dry >= 1.0
#self.out_nds_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_nds_loc_dry
def nds_rq_semi(self):
"""
Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_nds_rq_semi = self.out_total_semi / self.ec25_nonlisted_seedling_emergence_dicot
return self.out_nds_rq_semi
def loc_nds_semi(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X in semi-aquatic areas
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_semi]
self.out_nds_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_semi >= 1.0
#self.out_nds_loc_semi = exceed_boolean.map(lambda x:
#'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_nds_loc_semi
def nds_rq_spray(self):
"""
# Risk Quotient for NON-LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_nds_rq_spray = self.out_spray / self.out_min_nds_spray
return self.out_nds_rq_spray
def loc_nds_spray(self):
"""
Level of concern for non-listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_nds_rq_spray]
self.out_nds_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_nds_rq_spray >= 1.0
#self.out_nds_loc_spray = exceed_boolean.map(lambda x:
# 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for non-listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_nds_loc_spray
def lds_rq_dry(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in DRY areas
"""
self.out_lds_rq_dry = self.out_total_dry / self.noaec_listed_seedling_emergence_dicot
return self.out_lds_rq_dry
def loc_lds_dry(self):
"""
Level of concern for listed dicot seedlings exposed to pesticideX in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_dry]
self.out_lds_loc_dry = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_dry >= 1.0
#self.out_lds_loc_dry = exceed_boolean.map(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to dry areas indicates that potential risk is minimal.')
return self.out_lds_loc_dry
def lds_rq_semi(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X in SEMI-AQUATIC areas
"""
self.out_lds_rq_semi = self.out_total_semi / self.noaec_listed_seedling_emergence_dicot
return self.out_lds_rq_semi
def loc_lds_semi(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X in dry areas
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_semi]
self.out_lds_loc_semi = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_semi >= 1.0
#self.out_lds_loc_semi = exceed_boolean.map(lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via runoff to semi-aquatic areas indicates that potential risk is minimal.')
return self.out_lds_loc_semi
def lds_rq_spray(self):
"""
Risk Quotient for LISTED DICOT seedlings exposed to Pesticide X via SPRAY drift
"""
self.out_lds_rq_spray = self.out_spray / self.out_min_lds_spray
return self.out_lds_rq_spray
def loc_lds_spray(self):
"""
Level of concern for listed dicot seedlings exposed to pesticide X via spray drift
"""
msg_pass = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk."
msg_fail = "The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal."
boo_ratios = [ratio >= 1.0 for ratio in self.out_lds_rq_spray]
self.out_lds_loc_spray = pd.Series([msg_pass if boo else msg_fail for boo in boo_ratios])
#exceed_boolean = self.out_lds_rq_spray >= 1.0
#self.out_lds_loc_spray = exceed_boolean.map(
# lambda x:
# 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates a potential risk.' if x == True
# else 'The risk quotient for listed dicot seedlings exposed to the pesticide via spray drift indicates that potential risk is minimal.')
return self.out_lds_loc_spray
def min_nms_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
non-listed monocot EC25 and NOAEC
"""
s1 = pd.Series(self.ec25_nonlisted_seedling_emergence_monocot, name='seedling')
s2 = pd.Series(self.ec25_nonlisted_vegetative_vigor_monocot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_nms_spray = pd.DataFrame.min(df, axis=1)
return self.out_min_nms_spray
def min_lms_spray(self):
"""
determine minimum toxicity concentration used for RQ spray drift values
listed monocot EC25 and NOAEC
"""
s1 = pd.Series(self.noaec_listed_seedling_emergence_monocot, name='seedling')
s2 = pd.Series(self.noaec_listed_vegetative_vigor_monocot, name='vegetative')
df = pd.concat([s1, s2], axis=1)
self.out_min_lms_spray = | pd.DataFrame.min(df, axis=1) | pandas.DataFrame.min |
# %% [markdown]
# ##
import os
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
from joblib import Parallel, delayed
from sklearn.exceptions import ConvergenceWarning
from sklearn.manifold import MDS, TSNE, Isomap
from sklearn.metrics import pairwise_distances
from sklearn.neighbors import NearestNeighbors
from sklearn.utils.testing import ignore_warnings
from tqdm.autonotebook import tqdm
from umap import UMAP
from graspy.embed import (
AdjacencySpectralEmbed,
ClassicalMDS,
LaplacianSpectralEmbed,
OmnibusEmbed,
select_dimension,
selectSVD,
)
from graspy.plot import pairplot
from graspy.simulations import sbm
from graspy.utils import (
augment_diagonal,
binarize,
pass_to_ranks,
symmetrize,
to_laplace,
)
from src.align import Procrustes
from src.cluster import MaggotCluster, get_paired_inds
from src.data import load_metagraph
from src.graph import preprocess
from src.hierarchy import signal_flow
from src.io import savecsv, savefig
from src.visualization import (
CLASS_COLOR_DICT,
add_connections,
adjplot,
barplot_text,
draw_networkx_nice,
gridmap,
matrixplot,
palplot,
screeplot,
set_axes_equal,
stacked_barplot,
)
warnings.filterwarnings(action="ignore", category=ConvergenceWarning)
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
rc_dict = {
"axes.spines.right": False,
"axes.spines.top": False,
"axes.formatter.limits": (-3, 3),
"figure.figsize": (6, 3),
"figure.dpi": 100,
}
for key, val in rc_dict.items():
mpl.rcParams[key] = val
context = sns.plotting_context(context="talk", font_scale=1, rc=rc_dict)
sns.set_context(context)
np.random.seed(8888)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
def stashcsv(df, name, **kws):
savecsv(df, name, foldername=FNAME, **kws)
graph_type = "G"
def plot_pairs(
X, labels, model=None, left_pair_inds=None, right_pair_inds=None, equal=False
):
n_dims = X.shape[1]
fig, axs = plt.subplots(
n_dims, n_dims, sharex=False, sharey=False, figsize=(20, 20)
)
data = pd.DataFrame(data=X)
data["label"] = labels
for i in range(n_dims):
for j in range(n_dims):
ax = axs[i, j]
ax.axis("off")
if i < j:
sns.scatterplot(
data=data,
x=j,
y=i,
ax=ax,
alpha=0.7,
linewidth=0,
s=8,
legend=False,
hue="label",
palette=CLASS_COLOR_DICT,
)
if left_pair_inds is not None and right_pair_inds is not None:
add_connections(
data.iloc[left_pair_inds, j],
data.iloc[right_pair_inds, j],
data.iloc[left_pair_inds, i],
data.iloc[right_pair_inds, i],
ax=ax,
)
plt.tight_layout()
return fig, axs
def preprocess_adjs(adjs, method="ase"):
adjs = [pass_to_ranks(a) for a in adjs]
adjs = [a + 1 / a.size for a in adjs]
if method == "ase":
adjs = [augment_diagonal(a) for a in adjs]
elif method == "lse":
adjs = [to_laplace(a) for a in adjs]
return adjs
def omni(
adjs,
n_components=4,
remove_first=None,
concat_graphs=True,
concat_directed=True,
method="ase",
):
adjs = preprocess_adjs(adjs, method=method)
omni = OmnibusEmbed(n_components=n_components, check_lcc=False, n_iter=10)
embed = omni.fit_transform(adjs)
if concat_directed:
embed = np.concatenate(
embed, axis=-1
) # this is for left/right latent positions
if remove_first is not None:
embed = embed[remove_first:]
if concat_graphs:
embed = np.concatenate(embed, axis=0)
return embed
def ipsi_omni(adj, lp_inds, rp_inds, co_adj=None, n_components=4, method="ase"):
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs = [ll_adj, rr_adj]
if co_adj is not None:
co_ll_adj = co_adj[np.ix_(lp_inds, lp_inds)]
co_rr_adj = co_adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs += [co_ll_adj, co_rr_adj]
out_ipsi, in_ipsi = omni(
ipsi_adjs,
n_components=n_components,
concat_directed=False,
concat_graphs=False,
method=method,
)
left_embed = np.concatenate((out_ipsi[0], in_ipsi[0]), axis=1)
right_embed = np.concatenate((out_ipsi[1], in_ipsi[1]), axis=1)
ipsi_embed = np.concatenate((left_embed, right_embed), axis=0)
return ipsi_embed
def contra_omni(adj, lp_inds, rp_inds, co_adj=None, n_components=4, method="ase"):
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
contra_adjs = [lr_adj, rl_adj]
if co_adj is not None:
co_lr_adj = co_adj[np.ix_(lp_inds, rp_inds)]
co_rl_adj = co_adj[np.ix_(rp_inds, lp_inds)]
contra_adjs += [co_lr_adj, co_rl_adj]
out_contra, in_contra = omni(
contra_adjs,
n_components=n_components,
concat_directed=False,
concat_graphs=False,
method=method,
)
left_embed = np.concatenate((out_contra[0], in_contra[1]), axis=1)
right_embed = np.concatenate((out_contra[1], in_contra[0]), axis=1)
contra_embed = np.concatenate((left_embed, right_embed), axis=0)
return contra_embed
def lateral_omni(adj, lp_inds, rp_inds, n_components=4, method="ase"):
ipsi_embed = ipsi_omni(
adj, lp_inds, rp_inds, n_components=n_components, method=method
)
contra_embed = contra_omni(
adj, lp_inds, rp_inds, n_components=n_components, method=method
)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def multi_lateral_omni(adjs, lp_inds, rp_inds, n_components=4):
ipsi_adjs = []
for a in adjs:
ll_adj = a[np.ix_(lp_inds, lp_inds)]
rr_adj = a[np.ix_(rp_inds, rp_inds)]
ipsi_adjs.append(ll_adj)
ipsi_adjs.append(rr_adj)
ipsi_embed = omni(ipsi_adjs, concat_graphs=False, n_components=n_components)
left = []
right = []
for i, e in enumerate(ipsi_embed):
if i % 2 == 0:
left.append(e)
else:
right.append(e)
left = np.concatenate(left, axis=1)
right = np.concatenate(right, axis=1)
ipsi_embed = np.concatenate((left, right), axis=0)
contra_adjs = []
for a in adjs:
lr_adj = a[np.ix_(lp_inds, rp_inds)]
rl_adj = a[np.ix_(rp_inds, lp_inds)]
contra_adjs.append(lr_adj)
contra_adjs.append(rl_adj)
contra_embed = omni(contra_adjs, concat_graphs=False, n_components=n_components)
left = []
right = []
for i, e in enumerate(contra_embed):
if i % 2 == 0:
left.append(e)
else:
right.append(e)
left = np.concatenate(left, axis=1)
right = np.concatenate(right, axis=1)
contra_embed = np.concatenate((left, right), axis=0)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def reg_lateral_omni(adj, base_adj, lp_inds, rp_inds, n_components=4):
base_ll_adj = base_adj[np.ix_(lp_inds, lp_inds)]
base_rr_adj = base_adj[np.ix_(rp_inds, rp_inds)]
ll_adj = adj[np.ix_(lp_inds, lp_inds)]
rr_adj = adj[np.ix_(rp_inds, rp_inds)]
ipsi_adjs = [base_ll_adj, base_rr_adj, ll_adj, rr_adj]
ipsi_embed = omni(ipsi_adjs, remove_first=2, n_components=n_components)
base_lr_adj = base_adj[np.ix_(lp_inds, rp_inds)]
base_rl_adj = base_adj[np.ix_(rp_inds, lp_inds)]
lr_adj = adj[np.ix_(lp_inds, rp_inds)]
rl_adj = adj[np.ix_(rp_inds, lp_inds)]
contra_adjs = [base_lr_adj, base_rl_adj, lr_adj, rl_adj]
contra_embed = omni(contra_adjs, remove_first=2, n_components=n_components)
embed = np.concatenate((ipsi_embed, contra_embed), axis=1)
return embed
def quick_embed_viewer(
embed, labels=None, lp_inds=None, rp_inds=None, left_right_indexing=False
):
if left_right_indexing:
lp_inds = np.arange(len(embed) // 2)
rp_inds = np.arange(len(embed) // 2) + len(embed) // 2
fig, axs = plt.subplots(3, 2, figsize=(20, 30))
cmds = ClassicalMDS(n_components=2)
cmds_euc = cmds.fit_transform(embed)
plot_df = pd.DataFrame(data=cmds_euc)
plot_df["labels"] = labels
plot_kws = dict(
x=0,
y=1,
hue="labels",
palette=CLASS_COLOR_DICT,
legend=False,
s=20,
linewidth=0.5,
alpha=0.7,
)
ax = axs[0, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("CMDS o euclidean")
cmds = ClassicalMDS(n_components=2, dissimilarity="precomputed")
pdist = symmetrize(pairwise_distances(embed, metric="cosine"))
cmds_cos = cmds.fit_transform(pdist)
plot_df[0] = cmds_cos[:, 0]
plot_df[1] = cmds_cos[:, 1]
ax = axs[0, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("CMDS o cosine")
tsne = TSNE(metric="euclidean")
tsne_euc = tsne.fit_transform(embed)
plot_df[0] = tsne_euc[:, 0]
plot_df[1] = tsne_euc[:, 1]
ax = axs[1, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("TSNE o euclidean")
tsne = TSNE(metric="precomputed")
tsne_cos = tsne.fit_transform(pdist)
plot_df[0] = tsne_cos[:, 0]
plot_df[1] = tsne_cos[:, 1]
ax = axs[1, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("TSNE o cosine")
umap = UMAP(metric="euclidean", n_neighbors=30, min_dist=1)
umap_euc = umap.fit_transform(embed)
plot_df[0] = umap_euc[:, 0]
plot_df[1] = umap_euc[:, 1]
ax = axs[2, 0]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("UMAP o euclidean")
umap = UMAP(metric="cosine", n_neighbors=30, min_dist=1)
umap_cos = umap.fit_transform(embed)
plot_df[0] = umap_cos[:, 0]
plot_df[1] = umap_cos[:, 1]
ax = axs[2, 1]
sns.scatterplot(data=plot_df, ax=ax, **plot_kws)
ax.axis("off")
add_connections(
plot_df.iloc[lp_inds, 0],
plot_df.iloc[rp_inds, 0],
plot_df.iloc[lp_inds, 1],
plot_df.iloc[rp_inds, 1],
ax=ax,
)
ax.set_title("UMAP o cosine")
def umapper(embed, metric="euclidean", n_neighbors=30, min_dist=1, **kws):
umap = UMAP(metric=metric, n_neighbors=n_neighbors, min_dist=min_dist)
umap_euc = umap.fit_transform(embed)
plot_df = | pd.DataFrame(data=umap_euc) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 11:01:20 2020
@author: Ray
@email: <EMAIL>
@wechat: RayTing0305
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import string
import scipy.stats as stats
'''
relax a little bit
test a quiz
'''
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj1 = pd.Series(sdata)
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj2 = pd.Series(sdata, index = states)
obj3 = pd.isnull(obj2)
x=obj2['California']
#print(obj2['California']!=x)
#print(obj2['California']==None)
#print(math.isnan(obj2['California']))
#print(pd.isna(obj2['California']))
d = {
'1': 'Alice',
'2': 'Bob',
'3': 'Rita',
'4': 'Molly',
'5': 'Ryan'
}
S = pd.Series(d)
data = pd.DataFrame(np.arange(16).reshape((4, 4)),
index=['Ohio', 'Colorado', 'Utah', 'New York'],
columns=['one', 'two', 'three', 'four'])
data1 = data.rename(mapper=lambda x:x.upper(), axis=1)
#data2 = data.rename(mapper=lambda x:x.upper(), axis=1, inplace=True)
#data3 = data.rename(mapper=lambda x:x.upper(), axis='column')
data = {'gre score':[337, 324, 316, 322, 314],
'toefl score':[118, 107, 104, 110, 103]}
df = pd.DataFrame(data, columns=['gre score', 'toefl score'],
index=np.arange(1, 6, 1))
df1 = df.where(df['toefl score']>105).dropna()
df2 = df[df['toefl score']>105]
df3 = df.where(df['toefl score']>105)
#s1 = pd.Series({1: 'Alice', 2: 'Jack', 3: 'Molly'})
#s2 = pd.Series({'Alice': 1, 'Jack': 2, 'Molly': 3})
df1 = df[df['toefl score'].gt(105)&df['toefl score'].lt(115)]
df2 = df[(df['toefl score'].isin(range(106,115)))]
df3 = (df['toefl score']>105)&(df['toefl score']<115)
data = {'Name':['Alice', 'Jack'],
'Age':[20, 22],
'Gender':['F', 'M']}
index1 = ['Mathematics', 'Sociology']
df = pd.DataFrame(data, index=index1)
seri = df.T['Mathematics']
for col in df.columns:
series = df[col]
# print(series)
'''
pandas中文文档
'''
s = pd.Series([1, 3, 5, np.nan, 6, 8])
dates = pd.date_range('20130111', periods=6)
df = pd.DataFrame(np.random.randn(6, 4), index=dates, columns=list('ABCD'))
df2 = pd.DataFrame({'A': 1.,
'B': | pd.Timestamp('20130102') | pandas.Timestamp |
import pandas as pd
import pytest
from eli5 import explain_prediction_df
from sklearn.datasets import load_boston
from pandas.testing import assert_series_equal
import ttrees
import build_model
def test_prediction_decomposition_eqal_eli5():
"""Test that the prediction decomposition outputs from xgb.explainer.decompose_prediction are eqaul to the outputs from eli5."""
model = build_model.build_depth_3_model()
boston = load_boston()
boston_data = | pd.DataFrame(boston["data"], columns=boston["feature_names"]) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Benchmark Results
# This notebook visualizes the output from the different models on different classification problems
# In[1]:
import collections
import glob
import json
import os
import numpy as np
import pandas as pd
from plotnine import *
from saged.utils import split_sample_names, create_dataset_stat_df, get_dataset_stats, parse_map_file
# ## Set Up Functions and Get Metadata
# In[3]:
def return_unlabeled():
# For use in a defaultdict
return 'unlabeled'
# In[4]:
data_dir = '../../data/'
map_file = os.path.join(data_dir, 'sample_classifications.pkl')
sample_to_label = parse_map_file(map_file)
sample_to_label = collections.defaultdict(return_unlabeled, sample_to_label)
# In[ ]:
metadata_path = os.path.join(data_dir, 'aggregated_metadata.json')
metadata = None
with open(metadata_path) as json_file:
metadata = json.load(json_file)
sample_metadata = metadata['samples']
# In[ ]:
experiments = metadata['experiments']
sample_to_study = {}
for study in experiments:
for accession in experiments[study]['sample_accession_codes']:
sample_to_study[accession] = study
# ## Sepsis classification
# In[8]:
in_files = glob.glob('../../results/single_label.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# In[11]:
plot = ggplot(sepsis_metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=3)
plot += ggtitle('PCA vs untransformed data for classifying sepsis')
print(plot)
# ## All labels
# In[12]:
in_files = glob.glob('../../results/all_labels.*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[13]:
metrics = None
for path in in_files:
if metrics is None:
metrics = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
metrics['unsupervised'] = unsupervised_model
metrics['supervised'] = supervised_model
else:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all_labels.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
metrics = pd.concat([metrics, new_df])
metrics
# In[14]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# In[15]:
plot = ggplot(metrics, aes(x='supervised', y='accuracy', fill='unsupervised'))
plot += geom_jitter(size=2)
plot += ggtitle('PCA vs untransformed data for all label classification')
print(plot)
# # Subsets of healthy labels
# In[16]:
in_files = glob.glob('../../results/subset_label.sepsis*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[17]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[18]:
print(sepsis_metrics[sepsis_metrics['healthy_used'] == 1])
# In[19]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[20]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[21]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Same analysis, but with tb instead of sepsis
# In[22]:
in_files = glob.glob('../../results/subset_label.tb*')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[23]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
if len(model_info) == 4:
unsupervised_model = model_info[0]
supervised_model = model_info[1]
else:
unsupervised_model = 'untransformed'
supervised_model = model_info[0]
new_df['unsupervised'] = unsupervised_model
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[24]:
print(tuberculosis_metrics[tuberculosis_metrics['healthy_used'] == 1])
# In[25]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[26]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='unsupervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[27]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Supervised Results Only
# The results above show that unsupervised learning mostly hurts performance rather than helping.
# The visualizations below compare each model based only on its supervised results.
# In[28]:
supervised_sepsis = sepsis_metrics[sepsis_metrics['unsupervised'] == 'untransformed']
# In[29]:
plot = ggplot(supervised_sepsis, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[30]:
supervised_tb = tuberculosis_metrics[tuberculosis_metrics['unsupervised'] == 'untransformed']
# In[31]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[32]:
plot = ggplot(supervised_tb, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Batch Effect Correction
# In[33]:
in_files = glob.glob('../../results/subset_label.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[34]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
print(model_info)
model_info = model_info.split('.')
print(model_info)
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
sepsis_metrics
# In[35]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', ))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[36]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## TB Batch effect corrected
# In[37]:
in_files = glob.glob('../../results/subset_label.tb*be_corrected.tsv')
print(in_files[:5])
# In[38]:
tuberculosis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
tuberculosis_metrics = pd.concat([tuberculosis_metrics, new_df])
tuberculosis_metrics = tuberculosis_metrics.rename({'fraction of healthy used': 'healthy_used'}, axis='columns')
tuberculosis_metrics['healthy_used'] = tuberculosis_metrics['healthy_used'].round(1)
tuberculosis_metrics
# In[39]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[40]:
plot = ggplot(tuberculosis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# ## Better Metrics, Same Label Distribution in Train and Val sets
# In[11]:
in_files = glob.glob('../../results/keep_ratios.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[12]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics = sepsis_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
sepsis_metrics['healthy_used'] = sepsis_metrics['healthy_used'].round(1)
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[13]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[14]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[15]:
plot = ggplot(sepsis_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[16]:
sepsis_stat_df = create_dataset_stat_df(sepsis_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'sepsis')
sepsis_stat_df.tail(5)
# In[17]:
ggplot(sepsis_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[18]:
plot = ggplot(sepsis_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Effect of All Sepsis Data')
plot
# ## Same Distribution Tuberculosis
# In[19]:
in_files = glob.glob('../../results/keep_ratios.tb*be_corrected.tsv')
print(in_files[:5])
# In[20]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics = tb_metrics.rename({'fraction of data used': 'healthy_used'}, axis='columns')
tb_metrics['healthy_used'] = tb_metrics['healthy_used'].round(1)
tb_metrics
# In[21]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy'))
plot += geom_boxplot()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[22]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[23]:
plot = ggplot(tb_metrics, aes(x='factor(healthy_used)', y='balanced_accuracy', fill='supervised'))
plot += geom_violin()
plot += ggtitle('Effect of subsetting healthy data on prediction accuracy')
print(plot)
# In[24]:
tb_stat_df = create_dataset_stat_df(tb_metrics,
sample_to_study,
sample_metadata,
sample_to_label,
'tb')
tb_stat_df.tail(5)
# In[55]:
ggplot(tb_stat_df, aes(x='train_val_diff',
y='balanced_accuracy',
color='val_disease_count')) + geom_point() + facet_grid('model ~ .')
# In[25]:
plot = ggplot(tb_metrics, aes(x='train sample count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot
# ## Results from Small Datasets
# In[57]:
in_files = glob.glob('../../results/small_subsets.sepsis*be_corrected.tsv')
print(in_files[:5])
# In[58]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[59]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[60]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[61]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## Small Training Set TB
# In[62]:
in_files = glob.glob('../../results/small_subsets.tb*be_corrected.tsv')
print(in_files[:5])
# In[63]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[64]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size Effects (equal label counts)')
print(plot)
# In[65]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('TB Dataset Size vs Models (equal label counts)')
print(plot)
# In[66]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth(method='loess')
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('TB (lack of a) Crossover Point')
plot
# ## Small training sets without be correction
# In[67]:
in_files = glob.glob('../../results/small_subsets.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[68]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
sepsis_metrics = sepsis_metrics[~(sepsis_metrics['supervised'] == 'deep_net')]
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[69]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Dataset Size Effects (equal label counts)')
print(plot)
# In[70]:
plot = ggplot(sepsis_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('Sepsis Datset Size by Model (equal label counts)')
print(plot)
# In[71]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[72]:
in_files = glob.glob('../../results/small_subsets.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[73]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[74]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy'))
plot += geom_boxplot()
plot += ggtitle('tb Dataset Size Effects (equal label counts)')
print(plot)
# In[75]:
plot = ggplot(tb_metrics, aes(x='factor(train_count)', y='balanced_accuracy', fill='supervised'))
plot += geom_boxplot()
plot += ggtitle('tb Datset Size by Model (equal label counts)')
print(plot)
# In[76]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('tb Crossover Point')
plot
# ## Large training sets without be correction
# In[6]:
in_files = glob.glob('../../results/keep_ratios.sepsis*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[9]:
sepsis_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('sepsis.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
sepsis_metrics = pd.concat([sepsis_metrics, new_df])
sepsis_metrics['train_count'] = sepsis_metrics['train sample count']
sepsis_metrics['supervised'] = sepsis_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
sepsis_metrics
# In[10]:
plot = ggplot(sepsis_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Sepsis Crossover Point')
plot
# ## TB Not Batch Effect Corrected
# In[80]:
in_files = glob.glob('../../results/keep_ratios.tb*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[81]:
tb_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tb.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
tb_metrics = pd.concat([tb_metrics, new_df])
tb_metrics['train_count'] = tb_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
tb_metrics = tb_metrics[~(tb_metrics['supervised'] == 'deep_net')]
tb_metrics['supervised'] = tb_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tb_metrics
# In[82]:
plot = ggplot(tb_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('tb Crossover Point')
plot
# ## Lupus Analyses
# In[83]:
in_files = glob.glob('../../results/keep_ratios.lupus*.tsv')
in_files = [file for file in in_files if 'be_corrected' in file]
print(in_files[:5])
# In[84]:
lupus_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('lupus.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
lupus_metrics = pd.concat([lupus_metrics, new_df])
lupus_metrics['train_count'] = lupus_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
lupus_metrics = lupus_metrics[~(lupus_metrics['supervised'] == 'deep_net')]
lupus_metrics['supervised'] = lupus_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
lupus_metrics
# In[85]:
plot = ggplot(lupus_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('lupus Crossover Point')
plot
# ## Lupus Not Batch Effect Corrected
# In[86]:
in_files = glob.glob('../../results/keep_ratios.lupus*.tsv')
in_files = [file for file in in_files if 'be_corrected' not in file]
print(in_files[:5])
# In[87]:
lupus_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('lupus.')[-1]
model_info = model_info.split('.')
supervised_model = model_info[0]
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-2]
lupus_metrics = pd.concat([lupus_metrics, new_df])
lupus_metrics['train_count'] = lupus_metrics['train sample count']
# Looking at the training curves, deep_net isn't actually training
# I need to fix it going forward, but for now I can clean up the visualizations by removing it
lupus_metrics = lupus_metrics[~(lupus_metrics['supervised'] == 'deep_net')]
lupus_metrics['supervised'] = lupus_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
lupus_metrics
# In[88]:
plot = ggplot(lupus_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('lupus Crossover Point')
plot
# ## Tissue Prediction
# In[2]:
in_files = glob.glob('../../results/Blood.Breast.*.tsv')
in_files = [f for f in in_files if 'be_corrected' not in f]
print(in_files[:5])
# In[3]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('Breast.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[4]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Blood vs Breast Tissue Prediction')
plot
# ### BE Corrected binary tissue classification
# In[5]:
in_files = glob.glob('../../results/Blood.Breast.*.tsv')
in_files = [f for f in in_files if 'be_corrected' in f]
print(in_files[:5])
# In[6]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('Breast.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[7]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += geom_hline(yintercept=.5, linetype='dashed')
plot += ggtitle('Blood vs Breast Tissue Prediction')
plot
# ### All Tissue Predictions
# In[8]:
in_files = glob.glob('../../results/all-tissue.*.tsv')
in_files = [f for f in in_files if 'be_corrected' not in f]
print(in_files[:5])
# In[9]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('all-tissue.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:-1])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('deep_net', 'five_layer_net')
tissue_metrics
# In[10]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('All Tissue Prediction')
plot
# ## Imputation pretraining
# In[11]:
in_files = glob.glob('../../results/tissue_impute.*.tsv')
print(in_files[:5])
# In[12]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('tissue_impute.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics = tissue_metrics.rename({'impute_samples': 'pretraining_sample_count'}, axis='columns')
tissue_metrics
# In[13]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='factor(supervised)'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Effects of Imputation on Multiclass Tissue Prediction')
plot += facet_grid('pretraining_sample_count ~ .')
plot
# In[14]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='factor(pretraining_sample_count)'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Effects of Imputation on Multiclass Tissue Prediction')
plot
# ## Adding BioBERT Embeddings
# In[15]:
in_files = glob.glob('../../results/all-tissue-biobert*.tsv')
print(in_files[:5])
# In[16]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('biobert.')[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics = tissue_metrics.rename({'impute_samples': 'pretraining_sample_count'}, axis='columns')
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[17]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='supervised'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Multiclass Tissue Prediction With Expression + BioBERT')
plot
# In[ ]:
# ## Sample Split Positive Control
# In[18]:
in_files = glob.glob('../../results/sample-split.*.tsv')
print(in_files[:5])
# In[19]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('biobert.')[-1]
model_info = model_info.split('.')
model_info = model_info[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[20]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='is_pretrained'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Transfer Learning Sample Positive Control')
plot += facet_grid('supervised ~ .')
plot
# In[21]:
single_run_df = tissue_metrics[tissue_metrics['seed'] == '1']
single_run_df.head()
# In[22]:
plot = ggplot(single_run_df, aes(x='train_count', y='balanced_accuracy', color='is_pretrained'))
#plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Sample Control Single Run Points')
plot += facet_grid('supervised ~ .')
plot
# In[ ]:
# ## Study Split Positive Control
# In[23]:
in_files = glob.glob('../../results/study-split.*.tsv')
print(in_files[:5])
# In[24]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('biobert.')[-1]
model_info = model_info.split('.')
model_info = model_info[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[25]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='is_pretrained'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Transfer Learning Study Positive Control')
plot += facet_grid('supervised ~ .')
plot
# In[26]:
single_run_df = tissue_metrics[tissue_metrics['seed'] == '1']
single_run_df.head()
# In[27]:
plot = ggplot(single_run_df, aes(x='train_count', y='balanced_accuracy', color='is_pretrained'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Study Level Single Run Points')
plot += facet_grid('supervised ~ .')
plot
# ## Tissue Split
# In[28]:
in_files = glob.glob('../../results/tissue-split.*.tsv')
print(in_files[:5])
# In[29]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('biobert.')[-1]
model_info = model_info.split('.')
model_info = model_info[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[30]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='is_pretrained'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Cross-Tissue Pretraining')
plot += facet_grid('supervised ~ .')
plot
# In[31]:
single_run_df = tissue_metrics[tissue_metrics['seed'] == '1']
single_run_df.head()
# In[32]:
plot = ggplot(single_run_df, aes(x='train_count', y='balanced_accuracy', color='is_pretrained'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Cross-tissue Single Run Points')
plot += facet_grid('supervised ~ .')
plot
# ## Sex Prediction
# ### Sample Level
# In[2]:
in_files = glob.glob('../../results/sample-split-sex-prediction.*.tsv')
print(in_files[:5])
# In[3]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('biobert.')[-1]
model_info = model_info.split('.')
model_info = model_info[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = pd.concat([tissue_metrics, new_df])
tissue_metrics['train_count'] = tissue_metrics['train sample count']
tissue_metrics['supervised'] = tissue_metrics['supervised'].str.replace('pytorch_supervised', 'three_layer_net')
tissue_metrics
# In[4]:
plot = ggplot(tissue_metrics, aes(x='train_count', y='balanced_accuracy', color='is_pretrained'))
plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Transfer Learning Sample Positive Control')
plot += facet_grid('supervised ~ .')
plot
# In[5]:
single_run_df = tissue_metrics[tissue_metrics['seed'] == '1']
single_run_df.head()
# In[6]:
plot = ggplot(single_run_df, aes(x='train_count', y='balanced_accuracy', color='is_pretrained'))
#plot += geom_smooth()
plot += geom_point(alpha=.2)
plot += ggtitle('Sample Control Single Run Points')
plot += facet_grid('supervised ~ .')
plot
# ### Study Level
# In[7]:
in_files = glob.glob('../../results/study-split-sex-prediction.*.tsv')
print(in_files[:5])
# In[8]:
tissue_metrics = pd.DataFrame()
for path in in_files:
new_df = pd.read_csv(path, sep='\t')
model_info = path.strip('.tsv').split('biobert.')[-1]
model_info = model_info.split('.')
model_info = model_info[-1]
model_info = model_info.split('_')
supervised_model = '_'.join(model_info[:2])
new_df['supervised'] = supervised_model
new_df['seed'] = model_info[-1]
tissue_metrics = | pd.concat([tissue_metrics, new_df]) | pandas.concat |
from collections import deque
from datetime import datetime
import operator
import re
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
import pandas.core.common as com
from pandas.core.computation.expressions import _MIN_ELEMENTS, _NUMEXPR_INSTALLED
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
msgs = [
r"Invalid comparison between dtype=datetime64\[ns\] and ndarray",
"invalid type promotion",
(
# npdev 1.20.0
r"The DTypes <class 'numpy.dtype\[.*\]'> and "
r"<class 'numpy.dtype\[.*\]'> do not have a common DType."
),
]
msg = "|".join(msgs)
with pytest.raises(TypeError, match=msg):
x >= y
with pytest.raises(TypeError, match=msg):
x > y
with pytest.raises(TypeError, match=msg):
x < y
with pytest.raises(TypeError, match=msg):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
msg = (
"'(<|>)=?' not supported between "
"instances of 'numpy.ndarray' and 'Timestamp'"
)
with pytest.raises(TypeError, match=msg):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError, match=msg):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
def test_df_flex_cmp_ea_dtype_with_ndarray_series(self):
ii = pd.IntervalIndex.from_breaks([1, 2, 3])
df = pd.DataFrame({"A": ii, "B": ii})
ser = pd.Series([0, 0])
res = df.eq(ser, axis=0)
expected = pd.DataFrame({"A": [False, False], "B": [False, False]})
tm.assert_frame_equal(res, expected)
ser2 = pd.Series([1, 2], index=["A", "B"])
res2 = df.eq(ser2, axis=1)
tm.assert_frame_equal(res2, expected)
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.skipif(not _NUMEXPR_INSTALLED, reason="numexpr not installed")
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(_MIN_ELEMENTS + 100).reshape(_MIN_ELEMENTS // 100 + 1, -1) * 100
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = pd.Series(tdi)
df = pd.DataFrame({0: dti, 1: tdi})
result = df.add(tser, axis=0)
expected = pd.DataFrame({0: dti + tdi, 1: tdi + tdi})
tm.assert_frame_equal(result, expected)
def test_df_add_flex_filled_mixed_dtypes(self):
# GH 19611
dti = pd.date_range("2016-01-01", periods=3)
ser = pd.Series(["1 Day", "NaT", "2 Days"], dtype="timedelta64[ns]")
df = pd.DataFrame({"A": dti, "B": ser})
other = pd.DataFrame({"A": ser, "B": ser})
fill = pd.Timedelta(days=1).to_timedelta64()
result = df.add(other, fill_value=fill)
expected = pd.DataFrame(
{
"A": pd.Series(
["2016-01-02", "2016-01-03", "2016-01-05"], dtype="datetime64[ns]"
),
"B": ser * 2,
}
)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame(
self, all_arithmetic_operators, float_frame, mixed_float_frame
):
# one instance of parametrized fixture
op = all_arithmetic_operators
def f(x, y):
# r-versions not in operator-stdlib; get op without "r" and invert
if op.startswith("__r"):
return getattr(operator, op.replace("__r", "__"))(y, x)
return getattr(operator, op)(x, y)
result = getattr(float_frame, op)(2 * float_frame)
expected = f(float_frame, 2 * float_frame)
tm.assert_frame_equal(result, expected)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
@pytest.mark.parametrize("op", ["__add__", "__sub__", "__mul__"])
def test_arith_flex_frame_mixed(
self, op, int_frame, mixed_int_frame, mixed_float_frame
):
f = getattr(operator, op)
# vs mix int
result = getattr(mixed_int_frame, op)(2 + mixed_int_frame)
expected = f(mixed_int_frame, 2 + mixed_int_frame)
# no overflow in the uint
dtype = None
if op in ["__sub__"]:
dtype = dict(B="uint64", C=None)
elif op in ["__add__", "__mul__"]:
dtype = dict(C=None)
tm.assert_frame_equal(result, expected)
_check_mixed_int(result, dtype=dtype)
# vs mix float
result = getattr(mixed_float_frame, op)(2 * mixed_float_frame)
expected = f(mixed_float_frame, 2 * mixed_float_frame)
tm.assert_frame_equal(result, expected)
_check_mixed_float(result, dtype=dict(C=None))
# vs plain int
result = getattr(int_frame, op)(2 * int_frame)
expected = f(int_frame, 2 * int_frame)
tm.assert_frame_equal(result, expected)
def test_arith_flex_frame_raise(self, all_arithmetic_operators, float_frame):
# one instance of parametrized fixture
op = all_arithmetic_operators
# Check that arrays with dim >= 3 raise
for dim in range(3, 6):
arr = np.ones((1,) * dim)
msg = "Unable to coerce to Series/DataFrame"
with pytest.raises(ValueError, match=msg):
getattr(float_frame, op)(arr)
def test_arith_flex_frame_corner(self, float_frame):
const_add = float_frame.add(1)
tm.assert_frame_equal(const_add, float_frame + 1)
# corner cases
result = float_frame.add(float_frame[:0])
tm.assert_frame_equal(result, float_frame * np.nan)
result = float_frame[:0].add(float_frame)
tm.assert_frame_equal(result, float_frame * np.nan)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], fill_value=3)
with pytest.raises(NotImplementedError, match="fill_value"):
float_frame.add(float_frame.iloc[0], axis="index", fill_value=3)
def test_arith_flex_series(self, simple_frame):
df = simple_frame
row = df.xs("a")
col = df["two"]
# after arithmetic refactor, add truediv here
ops = ["add", "sub", "mul", "mod"]
for op in ops:
f = getattr(df, op)
op = getattr(operator, op)
tm.assert_frame_equal(f(row), op(df, row))
tm.assert_frame_equal(f(col, axis=0), op(df.T, col).T)
# special case for some reason
tm.assert_frame_equal(df.add(row, axis=None), df + row)
# cases which will be refactored after big arithmetic refactor
tm.assert_frame_equal(df.div(row), df / row)
tm.assert_frame_equal(df.div(col, axis=0), (df.T / col).T)
# broadcasting issue in GH 7325
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="int64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
df = pd.DataFrame(np.arange(3 * 2).reshape((3, 2)), dtype="float64")
expected = pd.DataFrame([[np.nan, np.inf], [1.0, 1.5], [1.0, 1.25]])
result = df.div(df[0], axis="index")
tm.assert_frame_equal(result, expected)
def test_arith_flex_zero_len_raises(self):
# GH 19522 passing fill_value to frame flex arith methods should
# raise even in the zero-length special cases
ser_len0 = pd.Series([], dtype=object)
df_len0 = pd.DataFrame(columns=["A", "B"])
df = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
with pytest.raises(NotImplementedError, match="fill_value"):
df.add(ser_len0, fill_value="E")
with pytest.raises(NotImplementedError, match="fill_value"):
df_len0.sub(df["A"], axis=None, fill_value=3)
def test_flex_add_scalar_fill_value(self):
# GH#12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype="float")
df = pd.DataFrame({"foo": dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_frame_equal(res, exp)
class TestFrameArithmetic:
def test_td64_op_nat_casting(self):
# Make sure we don't accidentally treat timedelta64(NaT) as datetime64
# when calling dispatch_to_series in DataFrame arithmetic
ser = pd.Series(["NaT", "NaT"], dtype="timedelta64[ns]")
df = pd.DataFrame([[1, 2], [3, 4]])
result = df * ser
expected = pd.DataFrame({0: ser, 1: ser})
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_rowlike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
expected = pd.DataFrame(
[[2, 4], [4, 6], [6, 8]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + rowlike
tm.assert_frame_equal(result, expected)
result = rowlike + df
tm.assert_frame_equal(result, expected)
def test_df_add_2d_array_collike_broadcasts(self):
# GH#23000
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
expected = pd.DataFrame(
[[1, 2], [5, 6], [9, 10]],
columns=df.columns,
index=df.index,
# specify dtype explicitly to avoid failing
# on 32bit builds
dtype=arr.dtype,
)
result = df + collike
tm.assert_frame_equal(result, expected)
result = collike + df
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_rowlike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
rowlike = arr[[1], :] # shape --> (1, ncols)
assert rowlike.shape == (1, df.shape[1])
exvals = [
getattr(df.loc["A"], opname)(rowlike.squeeze()),
getattr(df.loc["B"], opname)(rowlike.squeeze()),
getattr(df.loc["C"], opname)(rowlike.squeeze()),
]
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index)
result = getattr(df, opname)(rowlike)
tm.assert_frame_equal(result, expected)
def test_df_arith_2d_array_collike_broadcasts(self, all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=["A", "B", "C"])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {
True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze()),
}
dtype = None
if opname in ["__rmod__", "__rfloordiv__"]:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index, dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
def test_df_bool_mul_int(self):
# GH 22047, GH 22163 multiplication by 1 should result in int dtype,
# not object dtype
df = pd.DataFrame([[False, True], [False, False]])
result = df * 1
# On appveyor this comes back as np.int32 instead of np.int64,
# so we check dtype.kind instead of just dtype
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
result = 1 * df
kinds = result.dtypes.apply(lambda x: x.kind)
assert (kinds == "i").all()
def test_arith_mixed(self):
left = pd.DataFrame({"A": ["a", "b", "c"], "B": [1, 2, 3]})
result = left + left
expected = pd.DataFrame({"A": ["aa", "bb", "cc"], "B": [2, 4, 6]})
tm.assert_frame_equal(result, expected)
def test_arith_getitem_commute(self):
df = pd.DataFrame({"A": [1.1, 3.3], "B": [2.5, -3.9]})
def _test_op(df, op):
result = op(df, 1)
if not df.columns.is_unique:
raise ValueError("Only unique columns supported by this test")
for col in result.columns:
tm.assert_series_equal(result[col], op(df[col], 1))
_test_op(df, operator.add)
_test_op(df, operator.sub)
_test_op(df, operator.mul)
_test_op(df, operator.truediv)
_test_op(df, operator.floordiv)
_test_op(df, operator.pow)
_test_op(df, lambda x, y: y + x)
_test_op(df, lambda x, y: y - x)
_test_op(df, lambda x, y: y * x)
_test_op(df, lambda x, y: y / x)
_test_op(df, lambda x, y: y ** x)
_test_op(df, lambda x, y: x + y)
_test_op(df, lambda x, y: x - y)
_test_op(df, lambda x, y: x * y)
_test_op(df, lambda x, y: x / y)
_test_op(df, lambda x, y: x ** y)
@pytest.mark.parametrize(
"values", [[1, 2], (1, 2), np.array([1, 2]), range(1, 3), deque([1, 2])]
)
def test_arith_alignment_non_pandas_object(self, values):
# GH#17901
df = pd.DataFrame({"A": [1, 1], "B": [1, 1]})
expected = pd.DataFrame({"A": [2, 2], "B": [3, 3]})
result = df + values
tm.assert_frame_equal(result, expected)
def test_arith_non_pandas_object(self):
df = pd.DataFrame(
np.arange(1, 10, dtype="f8").reshape(3, 3),
columns=["one", "two", "three"],
index=["a", "b", "c"],
)
val1 = df.xs("a").values
added = pd.DataFrame(df.values + val1, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val1, added)
added = pd.DataFrame((df.values.T + val1).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val1, axis=0), added)
val2 = list(df["two"])
added = pd.DataFrame(df.values + val2, index=df.index, columns=df.columns)
tm.assert_frame_equal(df + val2, added)
added = pd.DataFrame((df.values.T + val2).T, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val2, axis="index"), added)
val3 = np.random.rand(*df.shape)
added = pd.DataFrame(df.values + val3, index=df.index, columns=df.columns)
tm.assert_frame_equal(df.add(val3), added)
def test_operations_with_interval_categories_index(self, all_arithmetic_operators):
# GH#27415
op = all_arithmetic_operators
ind = pd.CategoricalIndex(pd.interval_range(start=0.0, end=2.0))
data = [1, 2]
df = pd.DataFrame([data], columns=ind)
num = 10
result = getattr(df, op)(num)
expected = pd.DataFrame([[getattr(n, op)(num) for n in data]], columns=ind)
tm.assert_frame_equal(result, expected)
def test_frame_with_frame_reindex(self):
# GH#31623
df = pd.DataFrame(
{
"foo": [pd.Timestamp("2019"), pd.Timestamp("2020")],
"bar": [pd.Timestamp("2018"), pd.Timestamp("2021")],
},
columns=["foo", "bar"],
)
df2 = df[["foo"]]
result = df - df2
expected = pd.DataFrame(
{"foo": [pd.Timedelta(0), pd.Timedelta(0)], "bar": [np.nan, np.nan]},
columns=["bar", "foo"],
)
tm.assert_frame_equal(result, expected)
def test_frame_with_zero_len_series_corner_cases():
# GH#28600
# easy all-float case
df = pd.DataFrame(np.random.randn(6).reshape(3, 2), columns=["A", "B"])
ser = pd.Series(dtype=np.float64)
result = df + ser
expected = pd.DataFrame(df.values * np.nan, columns=df.columns)
tm.assert_frame_equal(result, expected)
result = df == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
# non-float case should not raise on comparison
df2 = pd.DataFrame(df.values.view("M8[ns]"), columns=df.columns)
result = df2 == ser
expected = pd.DataFrame(False, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_zero_len_frame_with_series_corner_cases():
# GH#28600
df = pd.DataFrame(columns=["A", "B"], dtype=np.float64)
ser = pd.Series([1, 2], index=["A", "B"])
result = df + ser
expected = df
tm.assert_frame_equal(result, expected)
def test_frame_single_columns_object_sum_axis_1():
# GH 13758
data = {
"One": pd.Series(["A", 1.2, np.nan]),
}
df = pd.DataFrame(data)
result = df.sum(axis=1)
expected = pd.Series(["A", 1.2, 0])
tm.assert_series_equal(result, expected)
# -------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestFrameArithmeticUnsorted:
def test_frame_add_tz_mismatch_converts_to_utc(self):
rng = pd.date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
df = pd.DataFrame(np.random.randn(len(rng)), index=rng, columns=["a"])
df_moscow = df.tz_convert("Europe/Moscow")
result = df + df_moscow
assert result.index.tz is pytz.utc
result = df_moscow + df
assert result.index.tz is pytz.utc
def test_align_frame(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = pd.DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.values[1::2] = np.nan
tm.assert_frame_equal(result, expected)
half = ts[::2]
result = ts + half.take(np.random.permutation(len(half)))
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
#!/usr/bin/env python
# coding: utf-8
#
# EPY: stripped_notebook: {"metadata": {"kernelspec": {"display_name": "starfish", "language": "python", "name": "starfish"}, "language_info": {"codemirror_mode": {"name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.5"}}, "nbformat": 4, "nbformat_minor": 2}
# EPY: START markdown
### Reproduce DARTFISH results with Starfish
#
#DARTFISH is a multiplexed image based transcriptomics assay from the [Zhang lab](http://genome-tech.ucsd.edu/ZhangLab/). As of this writing, this assay is not published yet. Nevertheless, here we demonstrate that Starfish can be used to process the data from raw images into spatially resolved gene expression profiles
# EPY: END markdown
# EPY: START code
# EPY: ESCAPE %load_ext autoreload
# EPY: ESCAPE %autoreload 2
# EPY: ESCAPE %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from starfish import display
from starfish import data, FieldOfView
from starfish.types import Axes, Features, Levels
from starfish import IntensityTable
from starfish.image import Filter
from starfish.spots import DetectPixels
# EPY: END code
# EPY: START markdown
### Load data into Starfish from the Cloud
#
#The example data here corresopond to DARTFISHv1 2017. The group is actively working on improving the protocol. The data represent human brain tissue from the human occipital cortex from 1 field of view (FOV) of larger experiment. The data from one field of view correspond to 18 images from 6 imaging rounds (r) 3 color channels (c) and 1 z-plane (z). Each image is 988x988 (y,x)
# EPY: END markdown
# EPY: START code
use_test_data = os.getenv("USE_TEST_DATA") is not None
experiment = data.DARTFISH(use_test_data=use_test_data)
imgs = experiment.fov().get_image(FieldOfView.PRIMARY_IMAGES)
print(imgs)
# EPY: END code
# EPY: START markdown
### Visualize codebook
#
#The DARTFISH codebook maps pixel intensities across the rounds and channels to the corresponding barcodes and genes that those pixels code for. For this example dataset, the codebook specifies 96 possible barcodes. The codebook used in this experiment has 3 color channels and one blank channel, each of which contribute to codes. The presence of the blank channel will be important later when the filtering is described.
# EPY: END markdown
# EPY: START code
experiment.codebook
# EPY: END code
# EPY: START markdown
### Visualize raw data
#
#A nice way to page through all this data is to use the display command. We have commented this out for now, because it will not render in Github. Instead, we simply show an image from the first round and color channel.
# EPY: END markdown
# EPY: START code
# %gui qt5
# display(stack)
single_plane = imgs.sel({Axes.ROUND: 0, Axes.CH: 0, Axes.ZPLANE: 0})
single_plane = single_plane.xarray.squeeze()
plt.figure(figsize=(7,7))
plt.imshow(single_plane, cmap='gray', clim=list(np.percentile(single_plane, [1, 99.9])))
plt.title('Round: 0, Chanel:0')
plt.axis('off');
# EPY: END code
# EPY: START markdown
### Filter and scale raw data before decoding into spatially resolved gene expression
#
#First, we equalize the intensity of the images by scaling each image by its maximum intensity, which is equivalent to scaling by the 100th percentile value of the pixel values in each image.
# EPY: END markdown
# EPY: START code
sc_filt = Filter.Clip(p_max=100, level_method=Levels.SCALE_BY_CHUNK)
norm_imgs = sc_filt.run(imgs)
# EPY: END code
# EPY: START markdown
#Next, for each imaging round, and each pixel location, we zero the intensity values across all three color channels if the magnitude of this 3 vector is below a threshold. As such, the code value associated with these pixels will be the blank. This is necessary to support euclidean decoding for codebooks that include blank values.
# EPY: END markdown
# EPY: START code
z_filt = Filter.ZeroByChannelMagnitude(thresh=.05, normalize=False)
filtered_imgs = z_filt.run(norm_imgs)
# EPY: END code
# EPY: START markdown
### Decode the processed data into spatially resolved gene expression profiles
#
#Here, starfish decodes each pixel value, across all rounds and channels, into the corresponding target (gene) it corresponds too. Contiguous pixels that map to the same target gene are called as one RNA molecule. Intuitively, pixel vectors are matched to the codebook by computing the euclidean distance between the pixel vector and all codewords. The minimal distance gene target is then selected, if it is within `distance_threshold` of any code.
#
#This decoding operation requires some parameter tuning, which is described below. First, we look at a distribution of pixel vector barcode magnitudes to determine the minimum magnitude threshold at which we will attempt to decode the pixel vector.
# EPY: END markdown
# EPY: START code
def compute_magnitudes(stack, norm_order=2):
pixel_intensities = IntensityTable.from_image_stack(stack)
feature_traces = pixel_intensities.stack(traces=(Axes.CH.value, Axes.ROUND.value))
norm = np.linalg.norm(feature_traces.values, ord=norm_order, axis=1)
return norm
mags = compute_magnitudes(filtered_imgs)
plt.hist(mags, bins=20);
sns.despine(offset=3)
plt.xlabel('Barcode magnitude')
plt.ylabel('Number of pixels')
plt.yscale('log');
# EPY: END code
# EPY: START markdown
#Next, we decode the the data
# EPY: END markdown
# EPY: START code
# how much magnitude should a barcode have for it to be considered by decoding? this was set by looking at
# the plot above
magnitude_threshold = 0.5
# how big do we expect our spots to me, min/max size. this was set to be equivalent to the parameters
# determined by the Zhang lab.
area_threshold = (5, 30)
# how close, in euclidean space, should the pixel barcode be to the nearest barcode it was called to?
# here, I set this to be a large number, so I can inspect the distribution of decoded distances below
distance_threshold = 3
psd = DetectPixels.PixelSpotDecoder(
codebook=experiment.codebook,
metric='euclidean',
distance_threshold=distance_threshold,
magnitude_threshold=magnitude_threshold,
min_area=area_threshold[0],
max_area=area_threshold[1]
)
initial_spot_intensities, results = psd.run(filtered_imgs)
# EPY: END code
# EPY: START code
spots_df = initial_spot_intensities.to_features_dataframe()
spots_df['area'] = np.pi*spots_df['radius']**2
spots_df = spots_df.loc[spots_df[Features.PASSES_THRESHOLDS]]
spots_df.head()
# EPY: END code
# EPY: START markdown
### Compare to benchmark results
#
#The below plot aggregates gene copy number across cells in the field of view and compares the results to the same copy numbers from the authors' pipeline. This can likely be improved by tweaking parameters in the above algorithms.
# EPY: END markdown
# EPY: START code
# load results from authors' pipeline
# instead of just calling read_csv with the url, we are using python requests to load it to avoid a
# SSL certificate error on some platforms.
import io, requests
cnts_benchmark = pd.read_csv(io.BytesIO(requests.get('https://d2nhj9g34unfro.cloudfront.net/20181005/DARTFISH/fov_001/counts.csv').content))
cnts_benchmark.head()
# select spots with distance less than a threshold, and count the number of each target gene
min_dist = 0.6
cnts_starfish = spots_df[spots_df.distance<=min_dist].groupby('target').count()['area']
cnts_starfish = cnts_starfish.reset_index(level=0)
cnts_starfish.rename(columns = {'target':'gene', 'area':'cnt_starfish'}, inplace=True)
benchmark_comparison = | pd.merge(cnts_benchmark, cnts_starfish, on='gene', how='left') | pandas.merge |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.generic import ABCIndexClass
import pandas as pd
import pandas._testing as tm
from pandas.api.types import is_float, is_float_dtype, is_integer, is_scalar
from pandas.core.arrays import IntegerArray, integer_array
from pandas.core.arrays.integer import (
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
)
from pandas.tests.extension.base import BaseOpsUtil
def make_data():
return list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100]
@pytest.fixture(
params=[
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
UInt8Dtype,
UInt16Dtype,
UInt32Dtype,
UInt64Dtype,
]
)
def dtype(request):
return request.param()
@pytest.fixture
def data(dtype):
return integer_array(make_data(), dtype=dtype)
@pytest.fixture
def data_missing(dtype):
return integer_array([np.nan, 1], dtype=dtype)
@pytest.fixture(params=["data", "data_missing"])
def all_data(request, data, data_missing):
"""Parametrized fixture giving 'data' and 'data_missing'"""
if request.param == "data":
return data
elif request.param == "data_missing":
return data_missing
def test_dtypes(dtype):
# smoke tests on auto dtype construction
if dtype.is_signed_integer:
assert np.dtype(dtype.type).kind == "i"
else:
assert np.dtype(dtype.type).kind == "u"
assert dtype.name is not None
@pytest.mark.parametrize(
"dtype, expected",
[
(Int8Dtype(), "Int8Dtype()"),
(Int16Dtype(), "Int16Dtype()"),
(Int32Dtype(), "Int32Dtype()"),
(Int64Dtype(), "Int64Dtype()"),
(UInt8Dtype(), "UInt8Dtype()"),
(UInt16Dtype(), "UInt16Dtype()"),
(UInt32Dtype(), "UInt32Dtype()"),
(UInt64Dtype(), "UInt64Dtype()"),
],
)
def test_repr_dtype(dtype, expected):
assert repr(dtype) == expected
def test_repr_array():
result = repr(integer_array([1, None, 3]))
expected = "<IntegerArray>\n[1, <NA>, 3]\nLength: 3, dtype: Int64"
assert result == expected
def test_repr_array_long():
data = integer_array([1, 2, None] * 1000)
expected = (
"<IntegerArray>\n"
"[ 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>, 1,\n"
" ...\n"
" <NA>, 1, 2, <NA>, 1, 2, <NA>, 1, 2, <NA>]\n"
"Length: 3000, dtype: Int64"
)
result = repr(data)
assert result == expected
class TestConstructors:
def test_uses_pandas_na(self):
a = pd.array([1, None], dtype=pd.Int64Dtype())
assert a[1] is pd.NA
def test_from_dtype_from_float(self, data):
# construct from our dtype & string dtype
dtype = data.dtype
# from float
expected = pd.Series(data)
result = pd.Series(
data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)
)
tm.assert_series_equal(result, expected)
# from int / list
expected = pd.Series(data)
result = pd.Series(np.array(data).tolist(), dtype=str(dtype))
tm.assert_series_equal(result, expected)
# from int / array
expected = pd.Series(data).dropna().reset_index(drop=True)
dropped = np.array(data.dropna()).astype(np.dtype((dtype.type)))
result = pd.Series(dropped, dtype=str(dtype))
tm.assert_series_equal(result, expected)
class TestArithmeticOps(BaseOpsUtil):
def _check_divmod_op(self, s, op, other, exc=None):
super()._check_divmod_op(s, op, other, None)
def _check_op(self, s, op_name, other, exc=None):
op = self.get_op_from_name(op_name)
result = op(s, other)
# compute expected
mask = s.isna()
# if s is a DataFrame, squeeze to a Series
# for comparison
if isinstance(s, pd.DataFrame):
result = result.squeeze()
s = s.squeeze()
mask = mask.squeeze()
# other array is an Integer
if isinstance(other, IntegerArray):
omask = getattr(other, "mask", None)
mask = getattr(other, "data", other)
if omask is not None:
mask |= omask
# 1 ** na is na, so need to unmask those
if op_name == "__pow__":
mask = np.where(~s.isna() & (s == 1), False, mask)
elif op_name == "__rpow__":
other_is_one = other == 1
if isinstance(other_is_one, pd.Series):
other_is_one = other_is_one.fillna(False)
mask = np.where(other_is_one, False, mask)
# float result type or float op
if (
is_float_dtype(other)
or is_float(other)
or op_name in ["__rtruediv__", "__truediv__", "__rdiv__", "__div__"]
):
rs = s.astype("float")
expected = op(rs, other)
self._check_op_float(result, expected, mask, s, op_name, other)
# integer result type
else:
rs = pd.Series(s.values._data, name=s.name)
expected = op(rs, other)
self._check_op_integer(result, expected, mask, s, op_name, other)
def _check_op_float(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in float dtypes
expected[mask] = np.nan
if "floordiv" in op_name:
# Series op sets 1//0 to np.inf, which IntegerArray does not do (yet)
mask2 = np.isinf(expected) & np.isnan(result)
expected[mask2] = np.nan
tm.assert_series_equal(result, expected)
def _check_op_integer(self, result, expected, mask, s, op_name, other):
# check comparisons that are resulting in integer dtypes
# to compare properly, we convert the expected
# to float, mask to nans and convert infs
# if we have uints then we process as uints
# then convert to float
# and we ultimately want to create a IntArray
# for comparisons
fill_value = 0
# mod/rmod turn floating 0 into NaN while
# integer works as expected (no nan)
if op_name in ["__mod__", "__rmod__"]:
if is_scalar(other):
if other == 0:
expected[s.values == 0] = 0
else:
expected = expected.fillna(0)
else:
expected[
(s.values == 0).fillna(False)
& ((expected == 0).fillna(False) | expected.isna())
] = 0
try:
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
except ValueError:
expected = expected.astype(float)
expected[
((expected == np.inf) | (expected == -np.inf)).fillna(False)
] = fill_value
original = expected
expected = expected.astype(s.dtype)
expected[mask] = pd.NA
# assert that the expected astype is ok
# (skip for unsigned as they have wrap around)
if not s.dtype.is_unsigned_integer:
original = pd.Series(original)
# we need to fill with 0's to emulate what an astype('int') does
# (truncation) for certain ops
if op_name in ["__rtruediv__", "__rdiv__"]:
mask |= original.isna()
original = original.fillna(0).astype("int")
original = original.astype("float")
original[mask] = np.nan
tm.assert_series_equal(original, expected.astype("float"))
# assert our expected result
tm.assert_series_equal(result, expected)
def test_arith_integer_array(self, data, all_arithmetic_operators):
# we operate with a rhs of an integer array
op = all_arithmetic_operators
s = pd.Series(data)
rhs = pd.Series([1] * len(data), dtype=data.dtype)
rhs.iloc[-1] = np.nan
self._check_op(s, op, rhs)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
# scalar
op = all_arithmetic_operators
s = pd.Series(data)
self._check_op(s, op, 1, exc=TypeError)
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
op = all_arithmetic_operators
df = pd.DataFrame({"A": data})
self._check_op(df, op, 1, exc=TypeError)
def test_arith_series_with_array(self, data, all_arithmetic_operators):
# ndarray & other series
op = all_arithmetic_operators
s = pd.Series(data)
other = np.ones(len(s), dtype=s.dtype.type)
self._check_op(s, op, other, exc=TypeError)
def test_arith_coerce_scalar(self, data, all_arithmetic_operators):
op = all_arithmetic_operators
s = pd.Series(data)
other = 0.01
self._check_op(s, op, other)
@pytest.mark.parametrize("other", [1.0, np.array(1.0)])
def test_arithmetic_conversion(self, all_arithmetic_operators, other):
# if we have a float operand we should have a float result
# if that is equal to an integer
op = self.get_op_from_name(all_arithmetic_operators)
s = pd.Series([1, 2, 3], dtype="Int64")
result = op(s, other)
assert result.dtype is np.dtype("float")
def test_arith_len_mismatch(self, all_arithmetic_operators):
# operating with a list-like with non-matching length raises
op = self.get_op_from_name(all_arithmetic_operators)
other = np.array([1.0])
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(ValueError, match="Lengths must match"):
op(s, other)
@pytest.mark.parametrize("other", [0, 0.5])
def test_arith_zero_dim_ndarray(self, other):
arr = integer_array([1, None, 2])
result = arr + np.array(other)
expected = arr + other
tm.assert_equal(result, expected)
def test_error(self, data, all_arithmetic_operators):
# invalid ops
op = all_arithmetic_operators
s = pd.Series(data)
ops = getattr(s, op)
opa = getattr(data, op)
# invalid scalars
msg = (
r"(:?can only perform ops with numeric values)"
r"|(:?IntegerArray cannot perform the operation mod)"
)
with pytest.raises(TypeError, match=msg):
ops("foo")
with pytest.raises(TypeError, match=msg):
ops(pd.Timestamp("20180101"))
# invalid array-likes
with pytest.raises(TypeError, match=msg):
ops(pd.Series("foo", index=s.index))
if op != "__rpow__":
# TODO(extension)
# rpow with a datetimelike coerces the integer array incorrectly
msg = (
"can only perform ops with numeric values|"
"cannot perform .* with this index type: DatetimeArray|"
"Addition/subtraction of integers and integer-arrays "
"with DatetimeArray is no longer supported. *"
)
with pytest.raises(TypeError, match=msg):
ops(pd.Series(pd.date_range("20180101", periods=len(s))))
# 2d
result = opa(pd.DataFrame({"A": s}))
assert result is NotImplemented
msg = r"can only perform ops with 1-d structures"
with pytest.raises(NotImplementedError, match=msg):
opa(np.arange(len(s)).reshape(-1, len(s)))
@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)])
def test_divide_by_zero(self, zero, negative):
# https://github.com/pandas-dev/pandas/issues/27398
a = pd.array([0, 1, -1, None], dtype="Int64")
result = a / zero
expected = np.array([np.nan, np.inf, -np.inf, np.nan])
if negative:
expected *= -1
tm.assert_numpy_array_equal(result, expected)
def test_pow_scalar(self):
a = pd.array([-1, 0, 1, None, 2], dtype="Int64")
result = a ** 0
expected = | pd.array([1, 1, 1, 1, 1], dtype="Int64") | pandas.array |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_FP_S1 Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for timber plantation. Source: Khasanah et al. (2015)
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
#df2 = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL.xlsx', 'RIL_S2')
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
t = range(0,tf,1)
c_firewood_energy_S1 = df1['Firewood_other_energy_use'].values
#c_loss_S2 = df2['C_loss'].values
c_firewood_energy_E = dfE['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_E')
c_pellets_E = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#S1
df = pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_S1')
tf = 201
t = np.arange(tf)
def decomp_S1(t,remainAGB_S1):
return (1-(1-np.exp(-a*t))**b)*remainAGB_S1
#set zero matrix
output_decomp_S1 = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part_S1 in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp_S1[i:,i] = decomp_S1(t[:len(t)-i],remain_part_S1)
print(output_decomp_S1[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1 = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix_S1[:,i] = np.diff(output_decomp_S1[:,i])
i = i + 1
print(subs_matrix_S1[:,:4])
print(len(subs_matrix_S1))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1 = subs_matrix_S1.clip(max=0)
print(subs_matrix_S1[:,:4])
#make the results as absolute values
subs_matrix_S1 = abs(subs_matrix_S1)
print(subs_matrix_S1[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1 = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix_S1)
subs_matrix_S1 = np.vstack((zero_matrix_S1, subs_matrix_S1))
print(subs_matrix_S1[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1 = (tf,1)
decomp_tot_S1 = np.zeros(matrix_tot_S1)
i = 0
while i < tf:
decomp_tot_S1[:,0] = decomp_tot_S1[:,0] + subs_matrix_S1[:,i]
i = i + 1
print(decomp_tot_S1[:,0])
#S1_C
df = | pd.read_excel('C:\\Work\\Programming\\Practice\\RIL_EC.xlsx', 'RIL_C_S1') | pandas.read_excel |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
import string
from collections import OrderedDict
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
import pytest
from kartothek.core.dataset import DatasetMetadata
from kartothek.core.index import ExplicitSecondaryIndex
from kartothek.core.uuid import gen_uuid
from kartothek.io_components.metapartition import MetaPartition
from kartothek.serialization import DataFrameSerializer
def test_file_structure_dataset_v4(store_factory, bound_store_dataframes):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, dataset_uuid="dataset_uuid", metadata_version=4
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
# TODO: json -> msgpack
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/cluster_1.parquet",
"dataset_uuid/helper/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/cluster_1.parquet",
"dataset_uuid/core/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on(store_factory, bound_store_dataframes):
store = store_factory()
assert set(store.keys()) == set()
df = pd.DataFrame(
{"P": [1, 2, 3, 1, 2, 3], "L": [1, 1, 1, 2, 2, 2], "TARGET": np.arange(10, 16)}
)
df_helper = pd.DataFrame(
{
"P": [1, 2, 3, 1, 2, 3],
"L": [1, 1, 1, 2, 2, 2],
"info": string.ascii_lowercase[:2],
}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P", "L"]
assert len(dataset.partitions) == 12
store = store_factory()
expected_keys = set(
[
"dataset_uuid.by-dataset-metadata.json",
"dataset_uuid/helper/P=1/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=1/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=2/L=2/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=1/cluster_2.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_1.parquet",
"dataset_uuid/helper/P=3/L=2/cluster_2.parquet",
"dataset_uuid/helper/_common_metadata",
"dataset_uuid/core/P=1/L=1/cluster_1.parquet",
"dataset_uuid/core/P=1/L=1/cluster_2.parquet",
"dataset_uuid/core/P=1/L=2/cluster_1.parquet",
"dataset_uuid/core/P=1/L=2/cluster_2.parquet",
"dataset_uuid/core/P=2/L=1/cluster_1.parquet",
"dataset_uuid/core/P=2/L=1/cluster_2.parquet",
"dataset_uuid/core/P=2/L=2/cluster_1.parquet",
"dataset_uuid/core/P=2/L=2/cluster_2.parquet",
"dataset_uuid/core/P=3/L=1/cluster_1.parquet",
"dataset_uuid/core/P=3/L=1/cluster_2.parquet",
"dataset_uuid/core/P=3/L=2/cluster_1.parquet",
"dataset_uuid/core/P=3/L=2/cluster_2.parquet",
"dataset_uuid/core/_common_metadata",
]
)
assert set(expected_keys) == set(store.keys())
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col(
store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_file_structure_dataset_v4_partition_on_second_table_no_index_col_simple_group(
store_factory, bound_store_dataframes
):
"""
Pandas seems to stop evaluating the groupby expression if the dataframes after the first column split
is of length 1. This seems to be an optimization which should, however, still raise a KeyError
"""
df = pd.DataFrame(
{"P": np.arange(0, 2), "L": np.arange(0, 2), "TARGET": np.arange(10, 12)}
)
df_helper = pd.DataFrame({"P": [0, 1], "info": string.ascii_lowercase[:2]})
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
with pytest.raises(Exception):
bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["P", "L"],
metadata_version=4,
)
def test_store_dataframes_as_dataset(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
{
"label": "cluster_2",
"data": [("core", df.copy(deep=True)), ("helper", df_helper)],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
secondary_indices=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
assert "P" in dataset.indices
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
index_dct = stored_dataset.indices["P"].load(store).index_dct
assert sorted(index_dct.keys()) == list(range(0, 10))
assert any([sorted(p) == ["cluster_1", "cluster_2"] for p in index_dct.values()])
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["helper"], store=store
)
pdt.assert_frame_equal(df_helper, df_stored)
def test_store_dataframes_as_dataset_empty_dataframe(
store_factory, metadata_version, df_all_types, bound_store_dataframes
):
"""
Test that writing an empty column succeeds.
In particular, this may fail due to too strict schema validation.
"""
df_empty = df_all_types.drop(0)
# Store a second table with shared columns. All shared columns must be of the same type
# This may fail in the presence of empty partitions if the schema validation doesn't account for it
df_shared_cols = df_all_types.loc[:, df_all_types.columns[:3]]
df_shared_cols["different_col"] = "a"
assert df_empty.empty
df_list = [
{
"label": "cluster_1",
"data": [("tableA", df_empty), ("tableB", df_shared_cols.copy(deep=True))],
},
{
"label": "cluster_2",
"data": [
("tableA", df_all_types),
("tableB", df_shared_cols.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableA"], store=store
)
pdt.assert_frame_equal(df_empty, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableA"], store=store
)
# Roundtrips for type date are not type preserving
df_stored["date"] = df_stored["date"].dt.date
pdt.assert_frame_equal(df_all_types, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["tableB"], store=store
)
pdt.assert_frame_equal(df_shared_cols, df_stored)
def test_store_dataframes_as_dataset_batch_mode(
store_factory, metadata_version, bound_store_dataframes
):
values_p1 = [1, 2, 3]
values_p2 = [4, 5, 6]
df = pd.DataFrame({"P": values_p1})
df2 = pd.DataFrame({"P": values_p2})
df_list = [
[
{
"label": "cluster_1",
"data": [("core", df)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_1"] for v in values_p1}
)
},
},
{
"label": "cluster_2",
"data": [("core", df2)],
"indices": {
"P": ExplicitSecondaryIndex(
"P", {v: ["cluster_2"] for v in values_p2}
)
},
},
]
]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
store = store_factory()
stored_dataset = DatasetMetadata.load_from_store(
"dataset_uuid", store
).load_all_indices(store)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_1"].files["core"], store=store
)
pdt.assert_frame_equal(df, df_stored)
df_stored = DataFrameSerializer.restore_dataframe(
key=dataset.partitions["cluster_2"].files["core"], store=store
)
pdt.assert_frame_equal(df2, df_stored)
assert stored_dataset.indices["P"].to_dict() == {
1: np.array(["cluster_1"], dtype=object),
2: np.array(["cluster_1"], dtype=object),
3: np.array(["cluster_1"], dtype=object),
4: np.array(["cluster_2"], dtype=object),
5: np.array(["cluster_2"], dtype=object),
6: np.array(["cluster_2"], dtype=object),
}
def test_store_dataframes_as_dataset_auto_uuid(
store_factory, metadata_version, mock_uuid, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df_helper = pd.DataFrame(
{"P": np.arange(0, 10), "info": string.ascii_lowercase[:10]}
)
df_list = [
{
"label": "cluster_1",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
{
"label": "cluster_2",
"data": [
("core", df.copy(deep=True)),
("helper", df_helper.copy(deep=True)),
],
},
]
dataset = bound_store_dataframes(
df_list, store=store_factory, metadata_version=metadata_version
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store(
"auto_dataset_uuid", store_factory()
)
assert dataset.uuid == stored_dataset.uuid
assert dataset.metadata == stored_dataset.metadata
assert dataset.partitions == stored_dataset.partitions
def test_store_dataframes_as_dataset_list_input(
store_factory, metadata_version, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame(
{
"P": np.arange(100, 110),
"L": np.arange(100, 110),
"TARGET": np.arange(10, 20),
}
)
df_list = [df, df2]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
)
assert isinstance(dataset, DatasetMetadata)
assert len(dataset.partitions) == 2
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store_factory())
assert dataset == stored_dataset
def test_store_dataframes_as_dataset_mp_partition_on_none(
metadata_version, store, store_factory, bound_store_dataframes
):
df = pd.DataFrame(
{"P": np.arange(0, 10), "L": np.arange(0, 10), "TARGET": np.arange(10, 20)}
)
df2 = pd.DataFrame({"P": np.arange(0, 10), "info": np.arange(100, 110)})
mp = MetaPartition(
label=gen_uuid(),
data={"core": df, "helper": df2},
metadata_version=metadata_version,
)
df_list = [None, mp]
dataset = bound_store_dataframes(
df_list,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=metadata_version,
partition_on=["P"],
)
assert isinstance(dataset, DatasetMetadata)
assert dataset.partition_keys == ["P"]
assert len(dataset.partitions) == 10
assert dataset.metadata_version == metadata_version
stored_dataset = DatasetMetadata.load_from_store("dataset_uuid", store)
assert dataset == stored_dataset
def test_store_dataframes_partition_on(store_factory, bound_store_dataframes):
df = pd.DataFrame(
OrderedDict([("location", ["0", "1", "2"]), ("other", ["a", "a", "a"])])
)
# First partition is empty, test this edgecase
input_ = [
{
"label": "label",
"data": [("order_proposals", df.head(0))],
"indices": {"location": {}},
},
{
"label": "label",
"data": [("order_proposals", df)],
"indices": {"location": {k: ["label"] for k in df["location"].unique()}},
},
]
dataset = bound_store_dataframes(
input_,
store=store_factory,
dataset_uuid="dataset_uuid",
metadata_version=4,
partition_on=["other"],
)
assert len(dataset.partitions) == 1
assert len(dataset.indices) == 1
assert dataset.partition_keys == ["other"]
def _exception_str(exception):
"""
Extract the exception message, even if this is a re-throw of an exception
in distributed.
"""
if isinstance(exception, ValueError) and exception.args[0] == "Long error message":
return exception.args[1]
return str(exception)
@pytest.mark.parametrize(
"dfs,ok",
[
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int32),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.int16),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int16),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int32),
"X": pd.Series([2], dtype=np.int64),
}
),
],
True,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": pd.Series([1], dtype=np.int64),
}
),
pd.DataFrame(
{
"P": pd.Series([2], dtype=np.int64),
"X": pd.Series([2], dtype=np.uint64),
}
),
],
False,
),
(
[
pd.DataFrame(
{
"P": pd.Series([1], dtype=np.int64),
"X": | pd.Series([1], dtype=np.int64) | pandas.Series |
import streamlit as st
import pandas as pd
import yfinance as yf
import datetime
import os
from pathlib import Path
import requests
import hvplot.pandas
import numpy as np
import matplotlib.pyplot as plt
from MCForecastTools_2Mod import MCSimulation
import plotly.express as px
from statsmodels.tsa.arima_model import ARIMA
import pmdarima as pm
from sklearn.linear_model import LinearRegression
#i commented out line 95-96 in the MCForecast file to avoid printing out lines "Running simulation number"
# title of the project and introduction on what to do
st.title('Dividends Reinvestment Dashboard')
st.write('Analysis of the **Power** of **Dividend Reinvestment**.')
st.write('Select from the list of stocks that pays dividends.')
st.write('You will then be able to select between three options.')
st.write('***Choose wisely***.')
# chosen stock and crypto tickers. choice of the 3 different options
tickers = ("AAPL","F","JPM","LUMN","MO","MSFT","T","XOM")
crypto = ("BTC-USD", "ETH-USD", "BNB-USD")
options = ("Keep the cash", "Same Stock", "Crypto")
# box selection for the stock to invest in
dropdown_stocks = st.selectbox('Pick your stock', tickers)
# starting date of the stock history. This is interactive and can be changed by the user
start = st.date_input('Start Date', value= pd.to_datetime('2011-01-01'))
end = st.date_input('End Date', value= pd.to_datetime('today'))
currentYear = datetime.datetime.now().year
# option to have a fix time period for historical data
# start= pd.to_datetime('2011-01-01')
# end= pd.to_datetime('today')
# this is a cache so the page does not reload the entire data if it is not changed
@st.cache
# function to let the user choose the stock
def close_price(dropdown_stocks):
data = yf.download(dropdown_stocks, period = "today", interval= "1d")['Adj Close'][0]
price = data
return round(data,2)
tickerData = yf.Ticker(dropdown_stocks) # Get ticker data
stock_name = tickerData.info['shortName']
# this will display the chosen stock, the value of the stock, and a line chart of the price history
if len(dropdown_stocks) > 0:
df = yf.download(dropdown_stocks, start, end)['Adj Close']
st.subheader(f'Historical value of {dropdown_stocks} ({stock_name})')
st.info('The current value is ${}'.format(close_price(dropdown_stocks)))
st.line_chart(df)
# df_history = tickerData.history(start = start, end = end)
# st.dataframe(df_history)
# Showing what is the yearly dividend % for the chosen stock
st.text(f'The average yearly dividend {dropdown_stocks} is:')
tickerData = yf.Ticker(dropdown_stocks) # Get ticker data
tickerDf = tickerData.history(period='1d', start=start, end=end) #get the historical prices for this ticker
# Calculate the yearly % after getting the value from yahoo finance
string_summary = tickerData.info['dividendYield']
yearly_div = (string_summary) * 100
st.info(f'{yearly_div: ,.2f}%')
# Asking the user for desired amount of share to purchase, showing 100 shares to start. minimum will be 10 shares
share_amount= st.number_input('How many shares do you want?',value=100, min_value=10)
st.header('You selected {} shares.'.format(share_amount))
@st.cache
# Calculating the value of the investment compare to the amount of share selected, giving the amount
def amount(share_amount):
value = close_price(dropdown_stocks) * share_amount
price = value
return round(value,2)
def regression(stock_df, forecast_years):
stock = yf.Ticker(dropdown_stocks)
stock_df = stock.history(start = start, end = end)
stock_df["Time"] = stock_df.index
stock_df["Time"] = stock_df["Time"].dt.year
dividends = stock_df.loc[stock_df["Dividends"] > 0]
dividends = dividends.drop(columns = ["Open", "High", "Low", "Close", "Volume", "Stock Splits"])
dividends = dividends.groupby(["Time"]).sum()
dividends["Years"] = dividends.index
index_col = []
for i in range(len(dividends.index)):
index_col.append(i)
dividends["Count"] = index_col
x_amount = dividends["Count"].values.reshape(-1,1)
y_amount = dividends["Dividends"].values.reshape(-1,1)
amount_regression = LinearRegression().fit(x_amount,y_amount)
yfit_amount = amount_regression.predict(x_amount)
amount_regression.coef_ = np.squeeze(amount_regression.coef_)
amount_regression.intercept_ = np.squeeze(amount_regression.intercept_)
fig = px.scatter(dividends, y = "Dividends", x = "Years", trendline = "ols")
st.write(fig)
amount_forecast = []
forecasted_year = []
for i in range(len(dividends.index) + forecast_years):
value = (amount_regression.coef_ * (i) + amount_regression.intercept_ )
amount_forecast.append(round(value,3))
forecasted_year.append(i+dividends["Years"].min())
forecasted_data = pd.DataFrame(columns = ["Year", "Forecasted Rates"])
forecasted_data["Year"] = forecasted_year
forecasted_data["Forecasted Rates"] = amount_forecast
return forecasted_data
initial_investment = (amount(share_amount))
st.info('Your initial investment is ${}'.format(amount(share_amount)))
# Showing amount of yearly dividend in $
st.text(f'Your current yearly dividend for the amount of shares you selected is $:')
# Calculate the yearly $ after getting the value from yahoo finance
string_summary2 = tickerData.info['dividendRate']
yearly_div_amount = (string_summary2) * (share_amount)
st.info(f'${yearly_div_amount}')
#Predict stock using series of Monte Carlo simulation. Only works with one stock at a time.
def mc_stock_price(years):
stock = yf.Ticker(dropdown_stocks)
stock_hist = stock.history(start = start, end = end)
stock_hist.drop(columns = ["Dividends","Stock Splits"], inplace = True)
stock_hist.rename(columns = {"Close":"close"}, inplace = True)
stock_hist = pd.concat({dropdown_stocks: stock_hist}, axis = 1)
#defining variables ahead of time in preparation for MC Simulation series
Upper_Yields = []
Lower_Yields = []
Means = []
Years = [currentYear]
iteration = []
for i in range(years+1):
iteration.append(i)
#beginning Simulation series and populating with outputs
#for x in range(number of years)
for x in range(years):
MC_looped = MCSimulation(portfolio_data = stock_hist,
num_simulation= 100,
num_trading_days= 252*x+1)
MC_summary_stats = MC_looped.summarize_cumulative_return()
Upper_Yields.append(MC_summary_stats["95% CI Upper"])
Lower_Yields.append(MC_summary_stats["95% CI Lower"])
Means.append(MC_summary_stats["mean"])
Years.append(currentYear+(x+1))
potential_upper_price = [element * stock_hist[dropdown_stocks]["close"][-1] for element in Upper_Yields]
potential_lower_price = [element * stock_hist[dropdown_stocks]["close"][-1] for element in Lower_Yields]
potential_mean_price = [element * stock_hist[dropdown_stocks]["close"][-1] for element in Means]
prices_df = pd.DataFrame(columns = ["Lower Bound Price", "Upper Bound Price", "Forecasted Average Price"])
prices_df["Lower Bound Price"] = potential_lower_price
prices_df["Forecasted Average Price"] = potential_mean_price
prices_df["Upper Bound Price"] = potential_upper_price
fig = px.line(prices_df)
fig.update_layout(
xaxis = dict(
tickmode = 'array',
tickvals = iteration,
ticktext = Years
)
)
st.write(fig)
return prices_df
def cumsum_shift(s, shift = 1, init_values = [0]):
s_cumsum = pd.Series(np.zeros(len(s)))
for i in range(shift):
s_cumsum.iloc[i] = init_values[i]
for i in range(shift,len(s)):
s_cumsum.iloc[i] = s_cumsum.iloc[i-shift] + s.iloc[i]
return s_cumsum
def predict_crypto(crypto, forecast_period=0):
forecast_days = forecast_period * 365
btc_data = yf.download(crypto, start, end)
btc_data["Date"] = btc_data.index
btc_data["Date"] = pd.to_datetime(btc_data["Date"])
btc_data["year"] = btc_data["Date"].dt.year
years = btc_data["year"].max() - btc_data["year"].min()
btc_data.reset_index(inplace = True, drop = True)
btc_data.drop(columns = ["Open", "High", "Low", "Adj Close", "Volume"], inplace = True)
btc_rolling = btc_data["Close"].rolling(window = round(len(btc_data.index)/100)).mean().dropna()
btc_change = btc_rolling.pct_change().dropna()
btc_cum = (1 + btc_change).cumprod()
list = []
for i in range(years):
list.append(btc_cum[i*round(len(btc_cum)/years):(i+1)*round(len(btc_cum)/years)].mean())
slope = (list[-1]-list[0])/len(list)
list2 = []
for i in range(years):
list2.append((slope*i)+list[0])
upper = []
lower = []
for i in range(years):
lower.append((slope*i) + (list[0]-slope))
upper.append((slope*i) + (list[0]+slope))
counter = 0
positions = []
for i in range(1, years):
if (list[i] >= lower[i]) & (list[i] <= upper[i]):
positions.append(i*round(len(btc_cum)/years))
positions.append((i+1)*round(len(btc_cum)/years))
counter+=1
if (counter < years/2):
btc_rolling = btc_data["Close"][positions[-2]:].rolling(window = round(len(btc_data.index)/100)).mean().dropna()
if forecast_period == 0:
auto_model = pm.auto_arima(btc_rolling)
model_str = str(auto_model.summary())
model_str = model_str[model_str.find("Model:"):model_str.find("Model:")+100]
start_find = model_str.find("(") + len("(")
end_find = model_str.find(")")
substring = model_str[start_find:end_find]
arima_order = substring.split(",")
for i in range(len(arima_order)):
arima_order[i] = int(arima_order[i])
arima_order = tuple(arima_order)
train = btc_rolling[:int(0.8*len(btc_rolling))]
test = btc_rolling[int(0.8*len(btc_rolling)):]
# test_length =
model = ARIMA(train.values, order=arima_order)
model_fit = model.fit(disp=0)
# if ( float(0.2*len(btc_rolling)) < int(0.2*len(btc_rolling))):
fc, se, conf = model_fit.forecast(len(test.index), alpha=0.05) # 95% conf
# else:
# fc, se, conf = model_fit.forecast((int(0.2*len(btc_rolling))), alpha=0.05)
fc_series = pd.Series(fc, index=test.index)
lower_series = pd.Series(conf[:, 0], index=test.index)
upper_series = pd.Series(conf[:, 1], index=test.index)
plt.rcParams.update({'font.size': 40})
fig = plt.figure(figsize=(40,20), dpi=100)
ax = fig.add_subplot(1,1,1)
l1 = ax.plot(train, label = "Training")
l2 = ax.plot(test, label = "Testing")
l3 = ax.plot(fc_series, label = "Forecast")
ax.fill_between(lower_series.index, upper_series, lower_series,
color='k', alpha=.15)
ax.set_title('Forecast vs Actuals')
fig.legend(loc='upper left', fontsize=40), (l1,l2,l3)
plt.rc('grid', linestyle="-", color='black')
plt.grid(True)
st.write(fig)
else:
auto_model = pm.auto_arima(btc_rolling)
model_str = str(auto_model.summary())
model_str = model_str[model_str.find("Model:"):model_str.find("Model:")+100]
start_find = model_str.find("(") + len("(")
end_find = model_str.find(")")
substring = model_str[start_find:end_find]
arima_order = substring.split(",")
for i in range(len(arima_order)):
arima_order[i] = int(arima_order[i])
arima_order = tuple(arima_order)
train = btc_rolling[:int(0.8*len(btc_rolling))]
test = btc_rolling[int(0.8*len(btc_rolling)):]
model = ARIMA(train.values, order=arima_order)
model_fit = model.fit(disp=0)
fighting = np.arange(0, (test.index[-1] + forecast_days) - test.index[0])
empty_df = | pd.DataFrame(fighting) | pandas.DataFrame |
import numpy as np
import pandas as pd
class AdjacencyMatrix:
def __init__(self, base_path):
self.base_path = base_path
def get_folder_data(self, folder):
news_df = pd.read_csv(self.base_path + folder + "/News.txt", header=None)
news_list = list(news_df[0])
users_df = | pd.read_csv(self.base_path + folder + "/User.txt", header=None) | pandas.read_csv |
import os
import pandas as pd
import numpy as np
import gsw
def string_converter(value):
'''To deal with Courtney CTD codes'''
return value.split(":")[-1].strip()
def int_converter(value):
'''To deal with Courtney CTD codes'''
return int(float(value.split(":")[-1].strip()))
def float_converter(value):
'''To deal with Courtney CTD codes'''
return float(value.split(":")[-1])
def merge_courtney_ctd_codes(bottle_df, qc_df):
'''Take in ctd bottle file to be given to database, qc logs, and combine together.
For use with Courtney style codes (P06W 2017).
Input:
bottle_file - dataframe
qc_file - dataframe
Output:
b_test - dataframe with updated codes
'''
flags = {
'T':'REFTMP_FLAG_W',
'T1':'CTDTMP1_FLAG_W',
'T2':'CTDTMP2_FLAG_W',
'C':'SALNTY_FLAG_W',
'C1':'CTDCOND1_FLAG_W',
'C2':'CTDCOND2_FLAG_W'
}
index = [
'STNNBR',
'CASTNO',
'SAMPNO'
]
### too lazy to fix it all in jupyter, do later
a = qc_df
b = bottle_df
### setup indicies for writing to
a["CASTNO"] = a["stacast"] % 100
a["STNNBR"] = (a["stacast"] - a["CASTNO"]) //100
a["new_params"] = None
### update new_params to flag_parameter for df.groupby() later
for param, flag_param in flags.items():
a.loc[a["parameter"]==param,"new_params"] = flag_param
a_gb = a.groupby('new_params')
### df.update() requires the index to be set before being called
b_test = b.set_index(index)
### loop through all flag_parameter names
for param, flag_param in flags.items():
try:
a_gb_project = a_gb.get_group(flag_param).copy()
a_gb_project.rename(columns={'flag':flag_param}, inplace=True)
### df.update() requires the index to be set before being called
a_test = a_gb_project[index+[flag_param]].set_index(index)
### df.update() returns inplace, so return signature = None
b_test.update(a_test)
except KeyError:
print(f'Parameter not found: {flag_param}')
### reset to work with again
b_test = b_test.reset_index()
return b_test
def load_courtney_ctd_codes(qc_file):
'''Load Courtney style ctd codes, and pass out dataframe.
Input:
qc_file - filepath to a courtney style ctd code
Output:
dataframe
'''
converters = {
0:int_converter,
1:int_converter,
2:string_converter,
3:float_converter,
4:int_converter,
5:float_converter,
6:float_converter,
7:float_converter,
}
names = [
"stacast",
"SAMPNO",
"parameter",
"pressure",
"flag",
"primary_diff",
"secondary_diff",
"p_minus_s",
]
return pd.read_csv(qc_file, header=None, converters=converters, names=names)
def load_to_odf_db_bottle_file(bottle_file):
'''File to be given to odf db maintainer to be loaded
Input:
bottle_file - filepath
Output:
dataframe
'''
return pd.read_csv(bottle_file, skiprows=[1], skipfooter=1, engine='python')
def load_exchange_bottle_file(bottle_file):
'''WHP-exchange bottle file from odf db
Input:
bottle_file - filepath
Output:
dataframe
'''
return | pd.read_csv(bottle_file, skiprows=[0,1,2,3,4,5,6,7,8,9,11], skipfooter=1, engine='python') | pandas.read_csv |
from numpy import mean
import pandas as pd
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plot
import matplotlib.mlab as mlab
import matplotlib.pylab as lab
import matplotlib.patches as patches
import matplotlib.ticker as plticker
from matplotlib import rcParams
from matplotlib import gridspec
from matplotlib import cm
import sys
import time
rcParams['font.sans-serif'] = 'Arial'
infile = sys.argv[1] # which file to go over
window = int(sys.argv[2]) # what size window was used?
slide = int(sys.argv[3]) # what slide
bottompanel = int(sys.argv[4]) # this is how often to plot points on the bottom panel
scaffold_file = sys.argv[5] # file with the scaffold and length
color_scheme = sys.argv[6] # what color scheme to use?
outdir = sys.argv[7] # where to output the pdf
# this is to process the file and output the shading correctly
def scale_dict(info_file):
info_read = open(info_file, "r")
info_dict = {}
final_lenny = 0
lenny = 0
for line in info_read:
linetab = (line.rstrip()).split("\t")
scaffy = linetab[0]
final_lenny = final_lenny + lenny
lenny = int(linetab[1])
info_dict[scaffy] = final_lenny
info_read.close()
return(info_dict)
# processing the scaffold information
shader = scale_dict(scaffold_file)
maxx = sum(pd.read_csv(scaffold_file, sep="\t", header=None)[1])
print(maxx)
midwin = window/2
outfiley = open(outdir+"/connected_fragments.txt", "w")
b = pd.read_csv(infile)
comparisons = list(b.columns[3:])
strains = []
ncomps = len(comparisons)
for comp in comparisons:
strainies = comp.split(";")
if (strainies[0] in strains)==False:
strains.append(strainies[0])
if comp == comparisons[ncomps-1]:
strains.append(strainies[1])
nstrains = len(strains)
print("nstrains", nstrains)
grids = sum(range(nstrains+4))+25
print("grids", grids)
fig = plot.figure(figsize=(grids, grids), dpi=10000)
gs = gridspec.GridSpec(grids, grids)
begger = 1
mover = nstrains-1
ender = begger + mover
lastfirst = None
boxcount = pd.Series()
## let's do colors
## have to normalize the data set first so that they're continuous
norm = matplotlib.colors.Normalize(vmin=75, vmax=100)
m = cm.ScalarMappable(norm=norm, cmap=color_scheme)
for comparison in comparisons:
print("%s: %s"%("processing", comparison))
compare = comparison.split(";")
compcol = b[comparison]
first = compare[0]
if lastfirst == None:
lastfirst = first
ax = plot.subplot(gs[begger:ender, :])
standing = strains.index(first)
remains = strains[standing+1:][::-1]
ncomp = len(remains)-1
tickies = list(pd.Series(range(len(remains)))+0.5)
# label offset needs to be .2 to worky well
plotsizies = ender - begger
tickplace = (plotsizies+0.25)/plotsizies
ax.set_title(first, y=tickplace)
####
ax.title.set_fontsize(80) # this is very awkward, but it the only way to do this
ax.set_ylim(0,ender-begger)
ax.set_xlim(0, max(b['total_med']))
yloc = plticker.FixedLocator(tickies)
ax.yaxis.set_ticklabels(remains)
ax.yaxis.set_tick_params(labelsize=60)
ax.yaxis.set_major_locator(yloc)
xloc = plticker.MultipleLocator(2000000)
ax.xaxis.set_major_locator(xloc)
lockyx = list(ax.xaxis.get_major_locator().tick_values(0,max(b['total_med'])))
ax.xaxis.set_tick_params(labelsize=150, colors='black')
# for better labeling:
new_lockyx = [int(i) for i in lockyx] # this is to create labels with numbers
xlabs = []
for i in new_lockyx:
j = str(i)
if len(str(j)) <= 2:
xlabs.append(i/1)
elif 3 <= len(str(j)) <= 6:
xlabs.append(i/1000)
elif 3 <= len(str(j)) <= 9:
xlabs.append(i/1000000)
else:
xlabs.append(round(i/float(1000000000), 1))
ax.xaxis.set_ticklabels(xlabs)
# this are the variables for the shading below
old = None
shade = True
# here comes the shading
for contig in sorted(shader):
val = shader[contig]
if old != None and shade == True:
plot.axvspan(old, val, color='0.85', alpha=0.5)
shade = False
else:
if old != None:
shade = True
old = shader[contig]
# the last one
if shade == True:
plot.axvspan(old, maxx, color='0.85', alpha=0.5)
else:
if first == lastfirst:
ncomp = ncomp- 1
pass
else:
standing = strains.index(first)
remains = strains[standing+1:][::-1]
ncomp = len(remains)-1
begger = ender + 4
mover = mover - 1
ender = begger + mover
lastfirst = first
ax = plot.subplot(gs[begger:ender, :])
tickies = list(pd.Series(range(len(remains)))+0.5)
# label offset needs to be .2 to worky well
plotsizies = ender - begger
tickplace = (plotsizies+0.25)/plotsizies
ax.set_title(first, y=tickplace)
####
ax.title.set_fontsize(80) # this is very awkward, but it the only way to do this
ax.set_ylim(0,ender-begger)
ax.set_xlim(0, max(b['total_med']))
yloc = plticker.FixedLocator(tickies)
ax.yaxis.set_ticklabels(remains)
ax.yaxis.set_tick_params(labelsize=60)
ax.yaxis.set_major_locator(yloc)
xloc = plticker.MultipleLocator(2000000)
ax.xaxis.set_major_locator(xloc)
lockyx = list(ax.xaxis.get_major_locator().tick_values(0,max(b['total_med'])))
ax.xaxis.set_tick_params(labelsize=150, colors='black')
# for better labeling:
new_lockyx = [int(i) for i in lockyx] # this is to create labels with numbers
xlabs = []
for i in new_lockyx:
j = str(i)
if len(str(j)) <= 2:
xlabs.append(i/1)
elif 3 <= len(str(j)) <= 6:
xlabs.append(i/1000)
elif 3 <= len(str(j)) <= 9:
xlabs.append(i/1000000)
else:
xlabs.append(round(i/float(1000000000), 1))
ax.xaxis.set_ticklabels(xlabs)
# this are the variables for the shading below
old = None
shade = True
# here comes the shading
for contig in sorted(shader):
val = shader[contig]
if old != None and shade == True:
plot.axvspan(old, val, color='0.85', alpha=0.5)
shade = False
else:
if old != None:
shade = True
old = shader[contig]
# the last one
if shade == True:
plot.axvspan(old, maxx, color='0.85', alpha=0.5)
second = compare[1]
############################## 75 ####################################
asel = compcol[(compcol >= 75) & (compcol < 83)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 75-83% similar")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(75)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
# let's deal with singletons:
for single in singletons:
medwinpos = b.loc[single,'total_med']
begpos = medwinpos-midwin
endpos = medwinpos+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(75)))
patchy = patches.Rectangle((begpos, ncomp),endpos-begpos, 1)
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
############################## 83 ####################################
asel = compcol[(compcol >= 83) & (compcol < 90)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 83-90% similar")
backsies = pd.Series(samies[1:]+[-20])
fronties = pd.Series([-20]+samies[:-1])
normies = pd.Series(samies)
topgo = backsies-samies
bottomgo = samies-fronties
tops = pd.Series([-20]+list(backsies[topgo == 1]))
bottoms = pd.Series(list(fronties[bottomgo == 1]))
cancels = tops-bottoms
endies = list(tops[cancels != 0])[1:] # no need to +1 because end included with .loc
beggies = list(bottoms[cancels != 0])
for terrier in range(len(endies)):
slicey = b.loc[beggies[terrier]:endies[terrier],'total_med']
aver = int(mean(b.loc[beggies[terrier]:endies[terrier], comparison]))
colori = m.to_rgba(aver)
singletons = singletons - set(range(beggies[terrier],endies[terrier]+1))
begpos = min(slicey)-midwin
endpos = max(slicey)+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(83)))
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
# let's deal with singletons:
for single in singletons:
medwinpos = b.loc[single,'total_med']
begpos = medwinpos-midwin
endpos = medwinpos+midwin
outfiley.write("%s\t%s\t%s\t%s\t%s\n" %(first, second, str(begpos), str(endpos), str(83)))
patchy = patches.Rectangle((begpos, ncomp),endpos-begpos, 1)
ax.add_patch(patches.Rectangle((begpos, ncomp),endpos-begpos, 1, fc = colori, ec=None))
############################## 90 #############################################
asel = compcol[(compcol >= 90) & (compcol < 95)]
samies = list(asel.index)
singletons = set(samies)
if len(samies) > 0:
print("processing 90-95% similar")
backsies = | pd.Series(samies[1:]+[-20]) | pandas.Series |
"""
This is an upgraded version of Ceshine's LGBM starter script, simply adding more
average features and weekly average features on it.
"""
from datetime import date, timedelta
import gc
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import LabelEncoder
import lightgbm as lgb
df_train = pd.read_csv(
'../input/train.csv', usecols=[1, 2, 3, 4, 5],
dtype={'onpromotion': bool},
converters={'unit_sales': lambda u: np.log1p(
float(u)) if float(u) > 0 else 0},
parse_dates=["date"],
skiprows=range(1, 66458909) # 2016-01-01
)
df_test = pd.read_csv(
"../input/test.csv", usecols=[0, 1, 2, 3, 4],
dtype={'onpromotion': bool},
parse_dates=["date"] # , date_parser=parser
).set_index(
['store_nbr', 'item_nbr', 'date']
)
items = pd.read_csv(
"../input/items.csv",
).set_index("item_nbr")
stores = pd.read_csv(
"../input/stores.csv",
).set_index("store_nbr")
le = LabelEncoder()
items['family'] = le.fit_transform(items['family'].values)
stores['city'] = le.fit_transform(stores['city'].values)
stores['state'] = le.fit_transform(stores['state'].values)
stores['type'] = le.fit_transform(stores['type'].values)
df_2017 = df_train.loc[df_train.date>=pd.datetime(2017,1,1)]
del df_train
promo_2017_train = df_2017.set_index(
["store_nbr", "item_nbr", "date"])[["onpromotion"]].unstack(
level=-1).fillna(False)
promo_2017_train.columns = promo_2017_train.columns.get_level_values(1)
promo_2017_test = df_test[["onpromotion"]].unstack(level=-1).fillna(False)
promo_2017_test.columns = promo_2017_test.columns.get_level_values(1)
promo_2017_test = promo_2017_test.reindex(promo_2017_train.index).fillna(False)
promo_2017 = | pd.concat([promo_2017_train, promo_2017_test], axis=1) | pandas.concat |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: li
@file: factor_operation_capacity.py
@time: 2019-05-30
"""
import gc
import sys
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../../')
import six, pdb
import pandas as pd
from pandas.io.json import json_normalize
from utilities.singleton import Singleton
# from basic_derivation import app
# from ultron.cluster.invoke.cache_data import cache_data
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
@six.add_metaclass(Singleton)
class FactorBasicDerivation(object):
"""
基础衍生类因子
"""
def __init__(self):
__str__ = 'factor_basic_derivation'
self.name = '基础衍生'
self.factor_type1 = '基础衍生'
self.factor_type2 = '基础衍生'
self.description = '基础衍生类因子'
@staticmethod
def EBIT(tp_derivation, factor_derivation, dependencies=['total_profit', 'interest_expense', 'interest_income', 'financial_expense']):
"""
:name: 息税前利润(MRQ)
:desc: [EBIT_反推法]息税前利润(MRQ) = 利润总额 + 利息支出 - 利息收入
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
func = lambda x: (x[0] + x[1] - x[2]) if x[1] is not None and x[2] is not None else (x[0] + x[3] if x[3] is not None else None)
management['EBIT'] = management[dependencies].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['EBIT']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EBITDA(tp_derivation, factor_derivation, dependencies=['total_profit', 'income_tax'],
dependency=['EBIT']):
"""
:name: 息前税后利润(MRQ)
:desc: 息前税后利润(MRQ)=EBIT(反推法)*(if 所得税&利润总额都>0,则1-所得税率,否则为1),所得税税率 = 所得税/ 利润总额
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: None if x[0] is None or x[1] is None or x[2] is None or x[1] == 0 else (x[0] * (1 - x[2] / x[1]) if x[1] > 0 and x[2] > 0 else x[0])
management['EBITDA'] = management[dependency].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['EBITDA']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def DepAndAmo(tp_derivation, factor_derivation, dependencies=['fixed_assets_depreciation',
'intangible_assets_amortization',
'defferred_expense_amortization']):
"""
:name: 折旧和摊销(MRQ)
:desc: 固定资产折旧 + 无形资产摊销 + 长期待摊费用摊销
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['DepAndAmo'] = management[dependencies].apply(func, axis=1)
# management = management.drop(dependencies, axis=1)
management = management[['DepAndAmo']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FCFF(tp_derivation, factor_derivation, dependencies=['total_current_assets',
'cash_equivalents',
'total_current_liability',
'shortterm_loan',
'shortterm_bonds_payable',
'non_current_liability_in_one_year',
'total_current_assets_pre',
'cash_equivalents_pre',
'total_current_liability_pre',
'shortterm_loan_pre',
'shortterm_bonds_payable_pre',
'non_current_liability_in_one_year_pre',
'fix_intan_other_asset_acqui_cash',
],
dependency=['EBITDA', 'DepAndAmo']):
"""
:name: 企业自由现金流量(MRQ)
:desc: 息前税后利润+折旧与摊销-营运资本增加-资本支出 = 息前税后利润+ 折旧与摊销-营运资本增加-构建固定无形和长期资产支付的现金, 营运资本 = 流动资产-流动负债, 营运资金=(流动资产-货币资金)-(流动负债-短期借款-应付短期债券-一年内到期的长期借款-一年内到期的应付债券)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] + x[1] - (x[2] - x[3] - x[4] + x[5] + x[6] + x[7]) + (x[2] - x[3] - x[4] + x[5] + x[6] + x[7]) - x[8]if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None and \
x[6] is not None and \
x[7] is not None and \
x[8] is not None else None
management['FCFF'] = management[dependency].apply(func, axis=1)
management = management[['FCFF']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def FCFE(tp_derivation, factor_derivation, dependencies=['borrowing_repayment',
'cash_from_borrowing',
'cash_from_bonds_issue'],
dependency=['FCFF']):
"""
:name: 股东自由现金流量(MRQ)
:desc: 企业自由现金流量-偿还债务所支付的现金+取得借款收到的现金+发行债券所收到的现金(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management2 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management2, how='outer', on='security_code')
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] - x[1] + x[2] + x[3] if x[0] is not None and x[1] is not None and \
x[2] is not None and x[3] is not None else None
management['FCFE'] = management[dependency].apply(func, axis=1)
management = management[['FCFE']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NonRecGainLoss(tp_derivation, factor_derivation, dependencies=['np_parent_company_owners', 'np_cut']):
"""
:name: 非经常性损益(MRQ)
:desc: 归属母公司净利润(MRQ) - 扣非净利润(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NonRecGainLoss'] = management[dependencies].apply(func, axis=1)
management = management[['NonRecGainLoss']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetOptInc(tp_derivation, factor_derivation, sw_industry,
dependencies_er=['total_operating_revenue', 'total_operating_cost'],
dependencies_yh=['commission_income', 'net_profit', 'other_business_profits', 'operating_cost'],
dependencies_zq=['commission_income', 'net_profit', 'other_operating_revenue', 'operating_cost'],
dependencies_bx=['operating_revenue', 'operating_cost', 'fair_value_variable_income',
'investment_income', 'exchange_income']):
"""
:name: 经营活动净收益(MRQ)
:desc: 新准则(一般企业):营业总收入-营业总成本"
:unit: 元
:view_dimension: 10000
"""
industry2_set = ['430100', '370100', '410400', '450500', '640500', '510100', '620500', '610200', '330200',
'280400', '620400', '450200', '270500', '610300', '280300', '360300', '410100', '370400',
'280200', '730200', '710200', '720200', '640400', '270300', '110400', '220100', '240300',
'270400', '710100', '420100', '420500', '420400', '370600', '720100', '640200', '220400',
'330100', '630200', '610100', '370300', '410300', '220300', '640100', '490300', '450300',
'220200', '370200', '460200', '420200', '460100', '360100', '620300', '110500', '650300',
'420600', '460300', '720300', '270200', '630400', '410200', '280100', '210200', '420700',
'650200', '340300', '220600', '110300', '350100', '620100', '210300', '240200', '340400',
'240500', '360200', '270100', '230100', '370500', '110100', '460400', '110700', '110200',
'630300', '450400', '220500', '730100', '640300', '630100', '240400', '420800', '650100',
'350200', '620200', '210400', '420300', '110800', '360400', '650400', '110600', '460500',
'430200', '210100', '240100', '250100', '310300', '320200', '310400', '310200', '320100',
'260500', '250200', '450100', '470200', '260200', '260400', '260100', '440200', '470400',
'310100', '260300', '220700', '470300', '470100', '340100', '340200', '230200']
dependencies = list(set(dependencies_er + dependencies_yh + dependencies_bx + dependencies_zq))
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
management = pd.merge(management, sw_industry, how='outer', on='security_code').set_index('security_code')
if len(management) <= 0:
return None
management_tm = pd.DataFrame()
func = lambda x: x[0] + x[1] + x[2] - x[3] if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None else None
# 银行 ['440100', '480100']
management_yh = management[management['industry_code2'].isin(['440100', '480100'])]
management_yh['NetOptInc'] = management_yh[dependencies_yh].apply(func, axis=1)
management_tm = management_tm.append(management_yh)
# 证券['440300', '490100']
management_zq = management[management['industry_code2'].isin(['440300', '490100'])]
management_zq['NetOptInc'] = management_zq[dependencies_zq].apply(func, axis=1)
management_tm = management_tm.append(management_zq)
func1 = lambda x: x[0] - x[1] - x[2] - x[3] - x[4] if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None else None
# 保险['440400', '490200']
management_bx = management[management['industry_code2'].isin(['440400', '490200'])]
management_bx['NetOptInc'] = management_bx[dependencies_bx].apply(func1, axis=1)
management_tm = management_tm.append(management_bx)
func2 = lambda x: None if x[0] is None else (x[0] if x[1] is None else x[0] - x[1])
management_er = management[management['industry_code2'].isin(industry2_set)]
management_er['NetOptInc'] = management_er[dependencies_er].apply(func2, axis=1)
management_tm = management_tm.append(management_er)
dependencies = dependencies + ['industry_code2']
management_tm = management_tm[['NetOptInc']]
factor_derivation = pd.merge(factor_derivation, management_tm, how='outer', on="security_code")
return factor_derivation
@staticmethod
def WorkingCap(tp_derivation, factor_derivation, dependencies=['total_current_assets',
'total_current_liability']):
"""
:name: 运营资本(MRQ)
:desc: 流动资产(MRQ)-流动负债(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['WorkingCap'] = management[dependencies].apply(func, axis=1)
management = management[['WorkingCap']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TangibleAssets(tp_derivation, factor_derivation, dependencies=['equities_parent_company_owners',
'intangible_assets',
'development_expenditure',
'good_will',
'long_deferred_expense',
'deferred_tax_assets']):
"""
:name: 有形资产(MRQ)
:desc: 股东权益(不含少数股东权益)- (无形资产 + 开发支出 + 商誉 + 长期待摊费用 + 递延所得税资产)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - (x[1] + x[2] + x[3] + x[4] + x[5]) if x[0] is not None and \
x[1] is not None and \
x[2] is not None and \
x[3] is not None and \
x[4] is not None and \
x[5] is not None else None
management['TangibleAssets'] = management[dependencies].apply(func, axis=1)
management = management[['TangibleAssets']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def RetainedEarnings(tp_derivation, factor_derivation, dependencies=['surplus_reserve_fund',
'retained_profit']):
"""
:name: 留存收益(MRQ)
:desc: 盈余公积MRQ + 未分配利润MRQ
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] if x[0] is not None and x[1] is not None else None
management['RetainedEarnings'] = management[dependencies].apply(func, axis=1)
management = management[['RetainedEarnings']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestFreeCurLb(tp_derivation, factor_derivation, dependencies=['bill_receivable',
'accounts_payable',
'advance_peceipts',
'salaries_payable',
'taxs_payable',
'accrued_expenses',
'other_payable',
'long_term_deferred_income',
'other_current_liability',
]):
"""
:name: 无息流动负债(MRQ)
:desc: 无息流动负债 = 应收票据+应付帐款+预收款项+应付职工薪酬+应交税费+其他应付款+预提费用+递延收益+其他流动负债
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] + x[2] + x[3] + x[4] + x[5] + x[6] + x[7] + x[8] if x[0] is not None or \
x[1] is not None or \
x[2] is not None or \
x[3] is not None or \
x[4] is not None or \
x[5] is not None or \
x[6] is not None or \
x[7] is not None or \
x[8] is not None else None
management['InterestFreeCurLb'] = management[dependencies].apply(func, axis=1)
management = management[['InterestFreeCurLb']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestFreeNonCurLb(tp_derivation, factor_derivation, dependencies=['total_non_current_liability',
'longterm_loan',
'bonds_payable']):
"""
:name: 无息非流动负债(MRQ)
:desc: 非流动负债合计 - 长期借款 - 应付债券
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: x[0] - x[1] - x[2] if x[0] is not None and x[1] is not None and x[2] is not None else None
management['InterestFreeNonCurLb'] = management[dependencies].apply(func, axis=1)
management = management[['InterestFreeNonCurLb']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def InterestBearingLiabilities(tp_derivation, factor_derivation, dependencies=['total_liability'],
dependency=['InterestFreeCurLb', 'InterestFreeNonCurLb']):
"""
:name: 带息负债(MRQ)
:desc: 负债合计-无息流动负债-无息非流动负债(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependencies + dependency
func = lambda x: x[0] - x[1] - x[2] if x[0] is not None and \
x[1] is not None and \
x[2] is not None else None
management['InterestBearingLiabilities'] = management[dependency].apply(func, axis=1)
management = management[['InterestBearingLiabilities']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def NetDebt(tp_derivation, factor_derivation, dependencies=['cash_equivalents'],
dependency=['InterestBearingLiabilities']):
"""
:name: 净债务(MRQ)
:desc: 净债务 = 带息债务(MRQ) - 货币资金(MRQ)。 其中,带息负债 = 短期借款 + 一年内到期的长期负债 + 长期借款 + 应付债券 + 应付利息
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
if len(management) <= 0:
return None
dependency = dependency + dependencies
func = lambda x: x[0] - x[1] if x[0] is not None and x[1] is not None else None
management['NetDebt'] = management[dependency].apply(func, axis=1)
management = management[['NetDebt']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def EquityPC(tp_derivation, factor_derivation, dependencies=['equities_parent_company_owners']):
"""
:name: 归属于母公司的股东权益(MRQ)
:desc: 归属于母公司的股东权益(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
management = management.rename(columns={'equities_parent_company_owners': 'EquityPC'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalInvestedCap(tp_derivation, factor_derivation, dependencies=['total_owner_equities' ],
dependency=['InterestBearingLiabilities']):
"""
:name: 全部投入资本(MRQ)
:desc: 股东权益+(负债合计-无息流动负债-无息长期负债)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management1 = factor_derivation.loc[:, dependency]
management = pd.merge(management, management1, how='outer', on="security_code")
management = management.fillna(0)
dependency = dependency + dependencies
if len(management) <= 0:
return None
func = lambda x: x[0] + x[1] if x[0] is not None and x[1] is not None else None
management['TotalInvestedCap'] = management[dependency].apply(func, axis=1)
management = management[['TotalInvestedCap']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalAssets(tp_derivation, factor_derivation, dependencies=['total_assets']):
"""
:name: 资产总计(MRQ)
:desc: 资产总计(MRQ) balance
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_assets': 'TotalAssets'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalFixedAssets(tp_derivation, factor_derivation, dependencies=['total_fixed_assets_liquidation']):
"""
:name: 固定资产合计(MRQ)
:desc: 固定资产合计(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_fixed_assets_liquidation': 'TotalFixedAssets'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalLib(tp_derivation, factor_derivation, dependencies=['total_liability']):
"""
:name: 负债合计(MRQ)
:desc: 负债合计(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_liability': 'TotalLib'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def ShEquity(tp_derivation, factor_derivation, dependencies=['total_owner_equities']):
"""
:name: 股东权益(MRQ)
:desc: 股东权益(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_owner_equities': 'ShEquity'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def CashAndCashEqu(tp_derivation, factor_derivation, dependencies=['cash_and_equivalents_at_end']):
"""
:name: 期末现金及现金等价物(MRQ)
:desc: 期末现金及现金等价物(MRQ)
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'cash_and_equivalents_at_end': 'CashAndCashEqu'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def SalesTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue']):
"""
:name: 营业总收入(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业总收入”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_operating_revenue': 'SalesTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def TotalOptCostTTM(tp_derivation, factor_derivation, dependencies=['total_operating_cost']):
"""
:name: 营业总成本(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业总成本”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'total_operating_cost': 'TotalOptCostTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def OptIncTTM(tp_derivation, factor_derivation, dependencies=['operating_revenue']):
"""
:name: 营业收入(TTM)
:desc: 根据截止指定日已披露的最新报告期“营业收入”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'operating_revenue': 'OptIncTTM'})
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def GrossMarginTTM(tp_derivation, factor_derivation, dependencies=['total_operating_revenue',
'total_operating_cost']):
"""
:name: 毛利(TTM) 营业毛利润
:desc: 根据截止指定日已披露的最新报告期“毛利”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
management = management.fillna(0)
if len(management) <= 0:
return None
func = lambda x: (x[0] - x[1]) / x[1] if x[1] != 0 and x[1] is not None else None
management['GrossMarginTTM'] = management[dependencies].apply(func, axis=1)
management = management[['GrossMarginTTM']]
factor_derivation = pd.merge(factor_derivation, management, how='outer', on="security_code")
return factor_derivation
@staticmethod
def SalesExpensesTTM(tp_derivation, factor_derivation, dependencies=['sale_expense']):
"""
:name: 销售费用(TTM)
:desc: 根据截止指定日已披露的最新报告期“销售费用”计算:(1)最新报告期是年报。则TTM=年报;(2)最新报告期不是年报,Q则TTM=本期+(上年年报-上年同期合并数),如果上年年报非空,本期、上年同期台并数存在空值,则返回上年年报。
:unit: 元
:view_dimension: 10000
"""
management = tp_derivation.loc[:, dependencies]
if len(management) <= 0:
return None
management = management.rename(columns={'SALESEsale_expenseXPE': 'SalesExpensesTTM'})
factor_derivation = | pd.merge(factor_derivation, management, how='outer', on="security_code") | pandas.merge |
import datetime
import re
from itertools import islice
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from dateutil.parser import parse as d
from utils_pandas import daterange
from utils_pandas import export
from utils_scraping import any_in
from utils_scraping import camelot_cache
from utils_scraping import get_next_number
from utils_scraping import get_next_numbers
from utils_scraping import logger
from utils_scraping import MAX_DAYS
from utils_scraping import NUM_OR_DASH
from utils_scraping import pairwise
from utils_scraping import parse_file
from utils_scraping import parse_numbers
from utils_scraping import seperate
from utils_scraping import split
from utils_scraping import strip
from utils_scraping import USE_CACHE_DATA
from utils_scraping import web_files
from utils_thai import file2date
from utils_thai import find_thai_date
from utils_thai import get_province
from utils_thai import join_provinces
from utils_thai import parse_gender
from utils_thai import today
def briefing_case_detail_lines(soup):
parts = soup.find_all('p')
parts = [c for c in [c.strip() for c in [c.get_text() for c in parts]] if c]
maintitle, parts = seperate(parts, lambda x: "วันที่" in x)
if not maintitle or "ผู้ป่วยรายใหม่ประเทศไทย" not in maintitle[0]:
return
# footer, parts = seperate(parts, lambda x: "กรมควบคุมโรค กระทรวงสาธารณสุข" in x)
table = list(split(parts, re.compile(r"^\w*[0-9]+\.").match))
if len(table) == 2:
# titles at the end
table, titles = table
table = [titles, table]
else:
table.pop(0)
# if only one table we can use camelot to get the table. will be slow but less problems
# ctable = camelot.read_pdf(file, pages="6", process_background=True)[0].df
for titles, cells in pairwise(table):
title = titles[0].strip("(ต่อ)").strip()
header, cells = seperate(cells, re.compile("ลักษณะผู้ติดเชื้อ").search)
# "อยู่ระหว่างสอบสวน (93 ราย)" on 2021-04-05 screws things up as its not a province
# have to use look behind
thai = r"[\u0E00-\u0E7Fa-zA-Z'. ]+[\u0E00-\u0E7Fa-zA-Z'.]"
not_prov = r"(?<!อยู่ระหว่างสอบสวน)(?<!ยู่ระหว่างสอบสวน)(?<!ระหว่างสอบสวน)"
provish = f"{thai}{not_prov}"
nl = " *\n* *"
nu = "(?:[0-9]+)"
is_pcell = re.compile(rf"({provish}(?:{nl}\({provish}\))?{nl}\( *{nu} *ราย *\))")
lines = pairwise(islice(is_pcell.split("\n".join(cells)), 1, None)) # because can be split over <p>
yield title, lines
def briefing_case_detail(date, pages):
num_people = re.compile(r"([0-9]+) *ราย")
totals = dict() # groupname -> running total
all_cells = {}
rows = []
if date <= d("2021-02-26"): # missing 2nd page of first lot (1.1)
pages = []
for soup in pages:
for title, lines in briefing_case_detail_lines(soup):
if "ติดเชื้อจากต่างประเทศ" in title: # imported
continue
elif "การคัดกรองเชิงรุก" in title:
case_type = "Proactive"
elif "เดินทางมาจากต่างประเทศ" in title:
# case_type = "Quarantine"
continue # just care about province cases for now
# if re.search("(จากระบบเฝ้าระวัง|ติดเชื้อในประเทศ)", title):
else:
case_type = "Walkin"
all_cells.setdefault(title, []).append(lines)
# print(title,case_type)
for prov_num, line in lines:
# for prov in provs: # TODO: should really be 1. make split only split 1.
# TODO: sometimes cells/data separated by "-" 2021-01-03
prov, num = prov_num.strip().split("(", 1)
prov = get_province(prov)
num = int(num_people.search(num).group(1))
totals[title] = totals.get(title, 0) + num
_, rest = get_next_numbers(line, "(?:nผล|ผลพบ)") # "result"
asym, rest = get_next_number(
rest,
"(?s)^.*(?:ไม่มีอาการ|ไมมี่อาการ|ไม่มีอาการ)",
default=0,
remove=True
)
sym, rest = get_next_number(
rest,
"(?s)^.*(?<!(?:ไม่มี|ไมมี่|ไม่มี))(?:อาการ|อาการ)",
default=0,
remove=True
)
unknown, _ = get_next_number(
rest,
"อยู่ระหว่างสอบสวนโรค",
# "อยู่ระหว่างสอบสวน",
"อยู่ระหว่างสอบสวน",
"อยู่ระหว่างสอบสวน",
"ไม่ระบุ",
default=0)
# unknown2 = get_next_number(
# rest,
# "อยู่ระหว่างสอบสวน",
# "อยู่ระหว่างสอบสวน",
# default=0)
# if unknown2:
# unknown = unknown2
# TODO: if 1, can be by itself
if asym == 0 and sym == 0 and unknown == 0:
sym, asym, unknown = None, None, None
else:
assert asym + sym + unknown == num
rows.append((date, prov, case_type, num, asym, sym))
# checksum on title totals
for title, total in totals.items():
m = num_people.search(title)
if not m:
continue
if date in [d("2021-03-19")]: # 1.1 64!=56
continue
assert total == int(m.group(1)), f"group total={total} instead of: {title}\n{all_cells[title]}"
df = pd.DataFrame(
rows,
columns=["Date", "Province", "Case Type", "Cases", "Cases Asymptomatic", "Cases Symptomatic"]
).set_index(['Date', 'Province'])
return df
def briefing_case_types(date, pages, url):
rows = []
vac_rows = []
if date < d("2021-02-01"):
pages = []
for i, soup in enumerate(pages):
text = soup.get_text()
if "รายงานสถานการณ์" not in text:
continue
# cases = get_next_number(text, "ติดเชื้อจาก", before=True)
# walkins = get_next_number(text.split("รายผู้ที่เดิน")[0], "ในประเทศ", until="ราย")
# quarantine = get_next_number(text, "ต่างประเทศ", until="ราย", default=0)
if date == d("2021-05-17"):
numbers, rest = get_next_numbers(text.split("อาการหนัก")[1], "ในประเทศ")
local, cases, imported, prison, walkins, proactive, imported2, prison2, *_ = numbers
assert local == walkins + proactive
assert imported == imported2
assert prison == prison2
else:
numbers, rest = get_next_numbers(text, "รวม", until="รายผู้ที่เดิน")
cases, walkins, proactive, *quarantine = numbers
domestic = get_next_number(rest, "ในประเทศ", return_rest=False, until="ราย")
if domestic and date not in [d("2021-11-22"), d("2021-12-02"), d("2021-12-29")]:
assert domestic <= cases
assert domestic == walkins + proactive
quarantine = quarantine[0] if quarantine else 0
ports, _ = get_next_number(
text,
"ช่องเส้นทางธรรมชาติ",
"รายผู้ที่เดินทางมาจากต่างประเทศ",
before=True,
default=0
)
imported = ports + quarantine
prison, _ = get_next_number(text.split("รวม")[1], "ที่ต้องขัง", default=0, until="ราย")
cases2 = get_next_number(rest, r"\+", return_rest=False, until="ราย")
if cases2 is not None and cases2 != cases:
# Total cases moved to the bottom
# cases == domestic
cases = cases2
assert cases == domestic + imported + prison
if date not in [d("2021-11-01")]:
assert cases == walkins + proactive + imported + prison, f"{date}: briefing case types don't match"
# hospitalisations
hospital, field, severe, respirator, hospitalised = [np.nan] * 5
numbers, rest = get_next_numbers(text, "อาการหนัก")
if numbers:
severe, respirator, *_ = numbers
hospital, _ = get_next_number(text, "ใน รพ.")
field, _ = get_next_number(text, "รพ.สนาม")
num, _ = get_next_numbers(text, "ใน รพ.", before=True)
hospitalised = num[0]
assert hospital + field == hospitalised or date in [d("2021-09-04")]
elif "ผู้ป่วยรักษาอยู่" in text:
hospitalised, *_ = get_next_numbers(text, "ผู้ป่วยรักษาอยู่", return_rest=False, before=True)
if date > d("2021-03-31"): # don't seem to add up before this
hospital, *_ = get_next_numbers(text, "ใน รพ.", return_rest=False, until="ราย")
field, *_ = get_next_numbers(text, "รพ.สนาม", return_rest=False, until="ราย")
assert hospital + field == hospitalised
if date < d("2021-05-18"):
recovered, _ = get_next_number(text, "(เพ่ิมขึ้น|เพิ่มขึ้น)", until="ราย")
else:
# 2021-05-18 Using single infographic with 3rd wave numbers?
numbers, _ = get_next_numbers(text, "หายป่วยแล้ว", "หายป่วยแลว้")
cum_recovered_3rd, recovered, *_ = numbers
if cum_recovered_3rd < recovered:
recovered = cum_recovered_3rd
assert not pd.isna(recovered)
deaths, _ = get_next_number(text, "เสียชีวิตสะสม", "เสียชีวติสะสม", "เสียชีวติ", before=True)
assert not any_in([None], cases, walkins, proactive, imported, recovered, deaths)
if date > d("2021-04-23"):
assert not any_in([None], hospital, field, severe, respirator, hospitalised)
# cases by region
# bangkok, _ = get_next_number(text, "กรุงเทพฯ และนนทบุรี")
# north, _ = get_next_number(text, "ภาคเหนือ")
# south, _ = get_next_number(text, "ภาคใต้")
# east, _ = get_next_number(text, "ภาคตะวันออก")
# central, _ = get_next_number(text, "ภาคกลาง")
# all_regions = north+south+east+central
# assert hospitalised == all_regions, f"Regional hospitalised {all_regions} != {hospitalised}"
rows.append([
date,
cases,
walkins,
proactive,
imported,
prison,
hospital,
field,
severe,
respirator,
hospitalised,
recovered,
deaths,
url,
])
break
df = pd.DataFrame(rows, columns=[
"Date",
"Cases",
"Cases Walkin",
"Cases Proactive",
"Cases Imported",
"Cases Area Prison", # Keep as Area so we don't repeat number.
"Hospitalized Hospital",
"Hospitalized Field",
"Hospitalized Severe",
"Hospitalized Respirator",
"Hospitalized",
"Recovered",
"Deaths",
"Source Cases",
]).set_index(['Date'])
if not df.empty:
logger.info("{} Briefing Cases: {}", date.date(), df.to_string(header=False, index=False))
return df
def briefing_province_cases(file, date, pages):
# TODO: also can be got from https://ddc.moph.go.th/viralpneumonia/file/scoreboard/scoreboard_02062564.pdf
# Seems updated around 3pm so perhaps not better than briefing
if date < d("2021-01-13"):
pages = []
rows = {}
for i, soup in enumerate(pages):
text = str(soup)
if "รวมท ัง้ประเทศ" in text:
continue
if not re.search(r"(?:ที่|ที)#?\s*(?:จังหวัด|จงัหวดั)", text): # 'ที# จงัหวดั' 2021-10-17
continue
if not re.search(r"(นวนผู้ติดเชื้อโควิดในประเทศรำยใหม่|อโควิดในประเทศรายให)", text):
continue
parts = [p.get_text() for p in soup.find_all("p")]
parts = [line for line in parts if line]
preamble, *tables = split(parts, re.compile(r"รวม\s*\((?:ราย|รำย)\)").search)
if len(tables) <= 1:
continue # Additional top 10 report. #TODO: better detection of right report
else:
title, parts = tables
while parts and "รวม" in parts[0]:
# get rid of totals line at the top
totals, *parts = parts
# First line might be several
totals, *more_lines = totals.split("\n", 1)
parts = more_lines + parts
parts = [c.strip() for c in NUM_OR_DASH.split("\n".join(parts)) if c.strip()]
while True:
if len(parts) < 9:
# TODO: can be number unknown cases - e.g. หมายเหตุ : รอสอบสวนโรค จานวน 337 ราย
break
if NUM_OR_DASH.search(parts[0]):
linenum, prov, *parts = parts
else:
# for some reason the line number doesn't show up? but it's there in the pdf...
break
numbers, parts = parts[:9], parts[9:]
thai = prov.strip().strip(" ี").strip(" ์").strip(" ิ")
if thai in ['กทม. และปรมิ ณฑล', 'รวมจงัหวดัอนื่ๆ(']:
# bangkok + suburbs, rest of thailand
break
prov = get_province(thai)
numbers = parse_numbers(numbers)
numbers = numbers[1:-1] # last is total. first is previous days
assert len(numbers) == 7
for i, cases in enumerate(reversed(numbers)):
if i > 4: # 2021-01-11 they use earlier cols for date ranges
break
olddate = date - datetime.timedelta(days=i)
if (olddate, prov) not in rows:
rows[(olddate, prov)] = cases
else:
# TODO: apparently 2021-05-13 had to merge two lines but why?
# assert (olddate, prov) not in rows, f"{prov} twice in prov table line {linenum}"
pass # if duplicate we will catch it below
# if False and olddate == date:
# if cases > 0:
# print(date, linenum, thai, PROVINCES["ProvinceEn"].loc[prov], cases)
# else:
# print("no cases", linenum, thai, *numbers)
data = ((d, p, c) for (d, p), c in rows.items())
df = pd.DataFrame(data, columns=["Date", "Province", "Cases"]).set_index(["Date", "Province"])
assert date >= d(
"2021-01-13") and not df.empty, f"Briefing on {date} failed to parse cases per province"
if date > d("2021-05-12") and date not in [d("2021-07-18")]:
# TODO: 2021-07-18 has only 76 prov. not sure why yet. maybe doubled up or mispelled names?
assert len(df.groupby("Province").count()) in [77, 78], f"Not enough provinces briefing {date}"
return df
def briefing_deaths_provinces(dtext, date, file):
if not deaths_title_re.search(dtext):
return | pd.DataFrame(columns=["Date", "Province"]) | pandas.DataFrame |
import os
import pandas as pd
import streamlit.components.v1 as components
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/build")
assert os.path.exists(build_dir)
_component_func = components.declare_component("st_datatable", path=build_dir)
def st_datatable(data, title: str = "", key=None):
component_value = _component_func(data=data, title=title, key=key, default= | pd.DataFrame() | pandas.DataFrame |
import logging
import pandas as pd
from datetime import timedelta, date
from ..models import Index, Quote, Quote_CSI300, Hvlc_report, Ublb_cross,\
Hvlc_strategy, Hvlc_report_history
from ..utils.utils import gen_id, latest_over_rsi70
logger = logging.getLogger('main.hvlc')
def hvlc_report(sdic):
# Create Hvlc_report table
s_l = sdic['learning']
Hvlc_report.__table__.create(s_l.get_bind(), checkfirst=True)
Hvlc_strategy.__table__.create(s_l.get_bind(), checkfirst=True)
Hvlc_report_history.__table__.create(s_l.get_bind(), checkfirst=True)
for dbname, s in sdic.items():
if dbname in ('testing','tsxci','nasdaq100','sp100','csi300','eei'):
logger.info("Start to process: %s" % dbname)
# tickers = [r.symbol for r in s.query(Index.symbol).distinct()]
# Get UBLB report
ublb_cross = pd.read_sql(s_l.query(Ublb_cross).
filter(Ublb_cross.index == dbname).statement, s_l.bind)
ublb_tickers = ublb_cross['symbol'].tolist()
# Get existing HVLC tickers list:
hvlc_r = pd.read_sql(s_l.query(Hvlc_report).
filter(Hvlc_report.index == dbname).statement, s_l.bind)
hvlc_tickers = hvlc_r['symbol'].tolist()
# Get Hvlc strategy
hvlc_strategy = s_l.query(Hvlc_strategy).one()
vp_ratio_low = hvlc_strategy.vp_ratio_low
vp_ratio_high = hvlc_strategy.vp_ratio_high
vr_low = hvlc_strategy.vr_low
vr_high = hvlc_strategy.vr_high
pr_low = hvlc_strategy.pr_low
pr_high = hvlc_strategy.pr_high
# ---- HVLC report -------Iterate UBLB cross tickers----------------
for ticker in ublb_tickers:
if dbname == 'csi300':
df = pd.read_sql(s.query(Quote_CSI300).\
filter(Quote_CSI300.symbol == ticker).\
statement, s.bind, index_col='date').sort_index()
else:
df = pd.read_sql(s.query(Quote).\
filter(Quote.symbol == ticker).\
statement, s.bind, index_col='date').sort_index()
try:
# Latest
df = df[(df != 0).all(1)]
df = df.sort_index(ascending=True).last('2y').drop(columns=['id'])
# find reached date in ublb cross
reached_date = ublb_cross.loc[ublb_cross['symbol'] == ticker]['date'].iloc[-1]
# Latest day info
df_latest = df.iloc[-1]
latest_date = df_latest.name
latest_close = df_latest['close']
latest_open = df_latest['open']
latest_high = df_latest['high']
latest_low = df_latest['low']
over_rsi70 = latest_over_rsi70(df)
# avg_vol = average_volume(df,90)
# Volume and Price change in percentage
# df['volchg'] = round((df['volume']/avg_vol -1)*100,2)
# df['pricechg'] = round(abs((df['close']/df['open'] -1)*100),2)
# No price change, reset to 0.01 in order to silence noise
# df.loc[df['pricechg'] == 0, 'pricechg'] = 0.01
# df['vol_price_ratio'] = round(df['volchg']/df['pricechg'],2)
vp_rank_ratio, v_rank, p_rank = _get_vp_rank(df, 99)
# Slice range after reached date
df_after_reached = df.loc[reached_date:]
high_date = df_after_reached['high'].idxmax()
low_date = df_after_reached['low'].idxmin()
high_price = df_after_reached['high'].max()
low_price = df_after_reached['low'].min()
lowest_close_date = df_after_reached['close'].idxmin()
# Latest v/p ratio
# latest_vol_price_ratio = df_after_reached.iloc[-1]['vol_price_ratio']
# Today can't be highest/lowest and lowest close date in reached range and should be in range of strategy
if (latest_close < high_price and latest_close > low_price
and latest_date > low_date and latest_date > high_date
and latest_date > lowest_close_date and over_rsi70 != True
# HVLC Strategy in Learning db
and ((vp_ratio_low <= vp_rank_ratio <= vp_ratio_high and vr_low <= v_rank <= vr_high)
or (vr_low <= v_rank <= vr_high and pr_low <= p_rank <= pr_high))
):
hvlc_date = df_after_reached.iloc[-1]
try:
# Remove existing record
s_l.query(Hvlc_report).filter(Hvlc_report.symbol == ticker,
Hvlc_report.index == dbname).delete()
s_l.commit()
record = {'id': gen_id(ticker+dbname+str(reached_date)+str(latest_date)),
'date': latest_date,
'reached_date': reached_date,
'index': dbname,
'symbol': ticker,
'volchg': v_rank,
'pricechg' : p_rank,
'vol_price_ratio' : vp_rank_ratio
}
s_l.add(Hvlc_report(**record))
s_l.commit()
logger.info("HVLC found - (%s, %s)" % (dbname, ticker))
except:
pass
except:
pass
# -------------------- HVLC remove ----------------------------------
for ticker in hvlc_tickers:
if dbname == 'csi300':
df = pd.read_sql(s.query(Quote_CSI300).\
filter(Quote_CSI300.symbol == ticker).\
statement, s.bind, index_col='date').sort_index()
else:
df = pd.read_sql(s.query(Quote).\
filter(Quote.symbol == ticker).\
statement, s.bind, index_col='date').sort_index()
# quote df
df = df[(df != 0).all(1)]
df = df.sort_index(ascending=True).last('52w').drop(columns=['id'])
over_rsi70 = latest_over_rsi70(df)
# Remove
hvlc_r = s_l.query(Hvlc_report).filter(Hvlc_report.symbol == ticker,
Hvlc_report.index == dbname).first()
df_latest = df.iloc[-1]
latest_close = df_latest['close'].item()
hvlc_close = df.loc[hvlc_r.date]['close']
chg_since_hvlc = round((latest_close/hvlc_close-1)*100,2)
# Clean up if rsi >= 70 in recent min_period
if over_rsi70 == True:
rsi_now = 70
# HVLC record
record = s_l.query(Hvlc_report).filter(Hvlc_report.symbol == ticker,
Hvlc_report.index == dbname).first()
# HVLC history record
his_record = {'id': gen_id(record.symbol+record.index+str(date.today())),
'index': record.symbol,
'symbol': record.index,
'delete_date':date.today(),
'hvlc_date':record.date,
'reached_date':record.reached_date,
'volchg':record.volchg,
'pricechg':record.pricechg,
'vol_price_ratio':record.vol_price_ratio,
'record_rsi':rsi_now}
try:
if record:
s_l.delete(record)
s_l.commit()
logger.info("HVLC deleted - (%s, %s)" % (dbname, ticker))
s_l.add(Hvlc_report_history(**his_record))
s_l.commit()
logger.info("HVLC history added - (%s, %s)" % (dbname, ticker))
except:
pass
elif chg_since_hvlc <= -10:
rsi_now = 30
# HVLC record
record = s_l.query(Hvlc_report).filter(Hvlc_report.symbol == ticker,
Hvlc_report.index == dbname).first()
# HVLC history record
his_record = {'id': gen_id(record.symbol+record.index+str(date.today())),
'index': record.symbol,
'symbol': record.index,
'delete_date':date.today(),
'hvlc_date':record.date,
'reached_date':record.reached_date,
'volchg':record.volchg,
'pricechg':record.pricechg,
'vol_price_ratio':record.vol_price_ratio,
'record_rsi':rsi_now}
try:
if record:
s_l.delete(record)
s_l.commit()
logger.info("HVLC deleted - (%s, %s)" % (dbname, ticker))
s_l.add(Hvlc_report_history(**his_record))
s_l.commit()
logger.info("HVLC history added - (%s, %s)" % (dbname, ticker))
except:
pass
def average_volume(df, span):
avg_vol = df.sort_index(ascending=True).last("6M")
avg_vol = df['volume'].rolling(window=span).mean()
return avg_vol
# Calculate ranking of volume and price and volume rank / price rank
def _get_vp_rank(df, n_days):
try:
# Calculate standardlize volume
df['volume_rank'] = df['volume'].rolling(n_days + 1).apply(lambda x: pd.Series(x).rank().iloc[-1], raw=True)
df['pricechg'] = round(abs((df['close']/df['open'] -1)*100),2)
df['pricechg_rank'] = df['pricechg'].rolling(n_days + 1).apply(lambda x: | pd.Series(x) | pandas.Series |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utilities functions to manipulate the data in the colab."""
import datetime
import itertools
import operator
from typing import List, Optional
import dataclasses
import numpy as np
import pandas as pd
import pandas.io.formats.style as style
from scipy import stats
from trimmed_match.design import common_classes
TimeWindow = common_classes.TimeWindow
FormatOptions = common_classes.FormatOptions
_operator_functions = {'>': operator.gt,
'<': operator.lt,
'<=': operator.le,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne}
_inverse_op = {'<': '>', '<=': '>=', '>': '<', '>=': '<=', '=': '!='}
@dataclasses.dataclass
class CalculateMinDetectableIroas:
"""Class for the calculation of the minimum detectable iROAS.
Hypothesis testing for H0: iROAS=0 vs H1: iROAS>=min_detectable_iroas based
on one sample X which follows a normal distribution with mean iROAS (unknown)
and standard deviation rmse (known).
Typical usage example:
calc_min_detectable_iroas = CalculateMinDetectableIroas(0.1, 0.9)
min_detectable_iroas = calc_min_detectable_iroas.at(2.0)
"""
# chance of rejecting H0 incorrectly when H0 holds.
significance_level: float = 0.1
# chance of rejecting H0 correctly when H1 holds.
power_level: float = 0.9
# minimum detectable iroas at rmse=1.
rmse_multiplier: float = dataclasses.field(init=False)
def __post_init__(self):
"""Calculates rmse_multiplier.
Raises:
ValueError: if significance_level or power_level is not in (0, 1).
"""
if self.significance_level <= 0 or self.significance_level >= 1.0:
raise ValueError('significance_level must be in (0, 1), but got '
f'{self.significance_level}.')
if self.power_level <= 0 or self.power_level >= 1.0:
raise ValueError('power_level must be in (0, 1), but got '
f'{self.power_level}.')
self.rmse_multiplier = (
stats.norm.ppf(self.power_level) +
stats.norm.ppf(1 - self.significance_level))
def at(self, rmse: float) -> float:
"""Calculates min_detectable_iroas at the specified rmse."""
return rmse * self.rmse_multiplier
def find_days_to_exclude(
dates_to_exclude: List[str]) -> List[TimeWindow]:
"""Returns a list of time windows to exclude from a list of days and weeks.
Args:
dates_to_exclude: a List of strings with format indicating a single day as
'2020/01/01' (YYYY/MM/DD) or an entire time period as
'2020/01/01 - 2020/02/01' (indicating start and end date of the time period)
Returns:
days_exclude: a List of TimeWindows obtained from the list in input.
"""
days_exclude = []
for x in dates_to_exclude:
tmp = x.split('-')
if len(tmp) == 1:
try:
days_exclude.append(
TimeWindow(pd.Timestamp(tmp[0]), | pd.Timestamp(tmp[0]) | pandas.Timestamp |
import logging
import pandas as pd
from configchecker import ConfigChecker
import whalealert.settings as settings
log = logging.getLogger(__name__)
class Writer():
""" Puslishing Whale Alert API status and database results"""
def __init__(self, status, database):
self.__status = status
self.__database = database
self.__health_list = [1] * settings.health_list_length
self.__last_written = []
if status is not None:
log.debug("Pulisher started with initial status {}".format(status.get_expectations()))
def get_status_object(self):
""" Return the status object used"""
return self.__status
def get_database(self):
"""Return the database object used"""
return self.__database
def write_transactions(self, transactions):
"""
Write transactions to the dataabse
Parameters:
transactions (dict): Transactions returned by api.get_trasactions
Returns:
True: Transactions were written successfully
False: An error occured, status written to logs.
"""
if len(transactions) == 0:
log.debug("Trying to write an empty list of transactions")
return False
for transaction in transactions:
try:
transaction = self.__squash_dictionary(transaction)
self.__create_tables_as_needed(transaction)
except KeyError:
log.error("Key error parsing keys from transaction {}".format(transaction))
return False
except Exception as e:
log.error("Exception {} when parsing keys from transaction {}".format(e, transaction))
return False
if self.__add_entries(transaction) is False:
log.error("Failed to add entriy to database. Entry {}".format(transaction))
return False
return True
def get_last_written(self):
current_transactions = list(self.__last_written)
self.__last_written.clear()
return | pd.DataFrame(current_transactions) | pandas.DataFrame |
import json
from typing import Tuple, Union
import pandas as pd
import numpy as np
import re
import os
from tableone import TableOne
from collections import defaultdict
from io import StringIO
from .gene_patterns import *
import plotly.express as px
import pypeta
from pypeta import Peta
from pypeta import filter_description
class SampleIdError(RuntimeError):
def __init__(self, sample_id: str, message: str):
self.sample_id = sample_id
self.message = message
class NotNumericSeriesError(RuntimeError):
def __init__(self, message: str):
self.message = message
class UnknowSelectionTypeError(RuntimeError):
def __init__(self, message: str):
self.message = message
class NotInColumnError(RuntimeError):
def __init__(self, message: str):
self.message = message
class GenesRelationError(RuntimeError):
def __init__(self, message: str):
self.message = message
class VariantUndefinedError(RuntimeError):
def __init__(self, message: str):
self.message = message
class ListsUnEqualLengthError(RuntimeError):
def __init__(self, message: str):
self.message = message
class DatetimeFormatError(RuntimeError):
def __init__(self, message: str):
self.message = message
class CDx_Data():
"""[summary]
"""
def __init__(self,
mut_df: pd.DataFrame = None,
cli_df: pd.DataFrame = None,
cnv_df: pd.DataFrame = None,
sv_df: pd.DataFrame = None,
json_str: str = None):
"""Constructor method with DataFrames
Args:
mut_df (pd.DataFrame, optional): SNV and InDel info. Defaults to None.
cli_df (pd.DataFrame, optional): Clinical info. Defaults to None.
cnv_df (pd.DataFrame, optional): CNV info. Defaults to None.
sv_df (pd.DataFrame, optional): SV info. Defaults to None.
"""
self.json_str = json_str
self.mut = mut_df
self.cnv = cnv_df
self.sv = sv_df
if not cli_df is None:
self.cli = cli_df
self.cli = self._infer_datetime_columns()
else:
self._set_cli()
self.crosstab = self.get_crosstab()
def __len__(self):
return 0 if self.cli is None else len(self.cli)
def __getitem__(self, n):
return self.select_by_sample_ids([self.cli.sampleId.iloc[n]])
def __sub__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = None if self.cli is None and cdx.cli is None else pd.concat(
[self.cli, cdx.cli]).drop_duplicates(keep=False)
mut = None if self.mut is None and cdx.mut is None else pd.concat(
[self.mut, cdx.mut]).drop_duplicates(keep=False)
cnv = None if self.cnv is None and cdx.cnv is None else pd.concat(
[self.cnv, cdx.cnv]).drop_duplicates(keep=False)
sv = None if self.sv is None and cdx.sv is None else pd.concat(
[self.sv, cdx.sv]).drop_duplicates(keep=False)
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def __add__(self, cdx):
if self.cli is None and cdx.cli is None:
return CDx_Data()
cli = pd.concat([self.cli, cdx.cli]).drop_duplicates()
mut = pd.concat([self.mut, cdx.mut]).drop_duplicates()
cnv = pd.concat([self.cnv, cdx.cnv]).drop_duplicates()
sv = pd.concat([self.sv, cdx.sv]).drop_duplicates()
return CDx_Data(cli_df=cli, mut_df=mut, cnv_df=cnv, sv_df=sv)
def from_PETA(self,
token: str,
json_str: str,
host='https://peta.bgi.com/api'):
"""Retrieve CDx data from BGI-PETA database.
Args:
token (str): Effective token for BGI-PETA database
json_str (str): The json format restrictions communicating to the database
"""
self.json_str = json_str
peta = Peta(token=token, host=host)
peta.set_data_restriction_from_json_string(json_str)
# peta.fetch_clinical_data() does`not process dtype inference correctly, do manully.
#self.cli = peta.fetch_clinical_data()
self.cli = pd.read_csv(
StringIO(peta.fetch_clinical_data().to_csv(None, index=False)))
self.mut = peta.fetch_mutation_data()
self.cnv = peta.fetch_cnv_data()
self.sv = peta.fetch_sv_data()
# dedup for the same sampleId in different studyIds, discard the duplicated ones from all tables
cli_original = self.cli
self.cli = self.cli.drop_duplicates('sampleId')
if (len(self.cli) < len(cli_original)):
print('Duplicated sampleId exists, drop duplicates and go on')
undup_tuple = [(x, y)
for x, y in zip(self.cli.sampleId, self.cli.studyId)]
self.sv = self.sv[self.sv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.cnv = self.cnv[self.cnv.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
self.mut = self.mut[self.mut.apply(
lambda x: (x['Tumor_Sample_Barcode'], x['studyId']) in undup_tuple,
axis=1)].drop_duplicates()
# time series
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
return filter_description(json_str)
def filter_description(self):
"""retrun filter description when data load from PETA
Returns:
str: description
"""
return filter_description(self.json_str) if self.json_str else None
def from_file(self,
mut_f: str = None,
cli_f: str = None,
cnv_f: str = None,
sv_f: str = None):
"""Get CDx data from files.
Args:
mut_f (str, optional): File as NCBI MAF format contains SNV and InDel. Defaults to None.
cli_f (str, optional): File name contains clinical info. Defaults to None.
cnv_f (str, optional): File name contains CNV info. Defaults to None.
sv_f (str, optional): File name contains SV info. Defaults to None.
"""
if not mut_f is None:
self.mut = pd.read_csv(mut_f, sep='\t')
if not cnv_f is None:
self.cnv = pd.read_csv(cnv_f, sep='\t')
if not sv_f is None:
self.sv = pd.read_csv(sv_f, sep='\t')
if not cli_f is None:
self.cli = pd.read_csv(cli_f, sep='\t')
else:
self._set_cli()
self.cli = self._infer_datetime_columns()
self.crosstab = self.get_crosstab()
def to_tsvs(self, path: str = './'):
"""Write CDx_Data properties to 4 seprated files
Args:
path (str, optional): Path to write files. Defaults to './'.
"""
if not self.cli is None:
self.cli.to_csv(os.path.join(path, 'sample_info.txt'),
index=None,
sep='\t')
if not self.mut is None:
self.mut.to_csv(os.path.join(path, 'mut_info.txt'),
index=None,
sep='\t')
if not self.cnv is None:
self.cnv.to_csv(os.path.join(path, 'cnv_info.txt'),
index=None,
sep='\t')
if not self.sv is None:
self.sv.to_csv(os.path.join(path, 'fusion_info.txt'),
index=None,
sep='\t')
def to_excel(self, filename: str = './output.xlsx'):
"""Write CDx_Data properties to excel file
Args:
filename (str, optional): target filename. Defaults to './output.xlsx'.
"""
if not filename.endswith('xlsx'):
filename = filename + '.xlsx'
with pd.ExcelWriter(filename) as ew:
if not self.cli is None:
self.cli.to_excel(ew, sheet_name='clinical', index=None)
if not self.mut is None:
self.mut.to_excel(ew, sheet_name='mutations', index=None)
if not self.cnv is None:
self.cnv.to_excel(ew, sheet_name='cnv', index=None)
if not self.sv is None:
self.sv.to_excel(ew, sheet_name='sv', index=None)
def _set_cli(self):
"""Set the cli attribute, generate a void DataFrame when it is not specified.
"""
sample_id_series = []
if not self.mut is None:
sample_id_series.append(
self.mut['Tumor_Sample_Barcode'].drop_duplicates())
if not self.cnv is None:
sample_id_series.append(
self.cnv['Tumor_Sample_Barcode'].drop_duplicates())
if not self.sv is None:
sample_id_series.append(
self.sv['Tumor_Sample_Barcode'].drop_duplicates())
if len(sample_id_series) > 0:
self.cli = pd.DataFrame({
'sampleId': | pd.concat(sample_id_series) | pandas.concat |
# Get data
import io
import numpy as np
import pandas as pd
import requests
from coronavirus.utils import fill_dates
def get_data():
country = get_hopkins()
pop_country = get_pop_country()
country = pd.merge(country, pop_country, how="left", on="country_name")
state = get_tracking()
pop_state = get_pop_state()
state = pd.merge(state, pop_state, how="left", on="state_code")
country.columns = [x if x != "country_code" else "code" for x in country.columns]
country.columns = [x if x != "country_name" else "name" for x in country.columns]
state.columns = [x if x != "state_code" else "code" for x in state.columns]
state.columns = [x if x != "state_name" else "name" for x in state.columns]
df = pd.concat([state, country], ignore_index=True)
df = df[["code", "name", "date", "pop", "cases", "deaths"]]
return df
def get_hopkins():
"""Get Johns Hopkins coronavirus cases and deaths data"""
file_cases = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
file_deaths = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv"
cases = pd.read_csv(file_cases)
deaths = pd.read_csv(file_deaths)
col_geo = ["state_name", "country_name", "lat", "long"]
cases.columns = col_geo + cases.columns.tolist()[4:]
deaths.columns = col_geo + deaths.columns.tolist()[4:]
deaths = deaths.iloc[:, [0, 1] + list(range(4, deaths.shape[1]))]
cases = pd.melt(cases, id_vars=col_geo, var_name="date", value_name="cases")
deaths = pd.melt(
deaths,
id_vars=["state_name", "country_name"],
var_name="date",
value_name="deaths",
)
df = pd.merge(cases, deaths, how="left", on=["state_name", "country_name", "date"])
df["date"] = pd.to_datetime(df["date"])
df["country_name"] = fix_country_name(df["country_name"])
agg = {"cases": "sum", "deaths": "sum"}
df = df.groupby(["date", "country_name"]).agg(agg).reset_index()
df = df[["date", "country_name", "cases", "deaths"]]
df = fill_dates(df, "country_name")
return df
def get_tracking():
"""Get Coronavirus tracking data"""
url = "http://covidtracking.com/api/states/daily.csv"
r = requests.get(url)
data = io.StringIO(r.text)
df = | pd.read_csv(data) | pandas.read_csv |
"""Module providing functions to plot data collected during sleep studies."""
import datetime
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Union
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import matplotlib.ticker as mticks
import pandas as pd
import seaborn as sns
from fau_colors import colors_all
from biopsykit.utils.datatype_helper import AccDataFrame, GyrDataFrame, ImuDataFrame, SleepEndpointDict
_sleep_imu_plot_params = {
"background_color": ["#e0e0e0", "#9e9e9e"],
"background_alpha": [0.3, 0.3],
}
_bbox_default = dict(
fc=(1, 1, 1, plt.rcParams["legend.framealpha"]),
ec=plt.rcParams["legend.edgecolor"],
boxstyle="round",
)
def sleep_imu_plot(
data: Union[AccDataFrame, GyrDataFrame, ImuDataFrame],
datastreams: Optional[Union[str, Sequence[str]]] = None,
sleep_endpoints: Optional[SleepEndpointDict] = None,
downsample_factor: Optional[int] = None,
**kwargs,
) -> Tuple[plt.Figure, Iterable[plt.Axes]]:
"""Draw plot to visualize IMU data during sleep, and, optionally, add sleep endpoints information.
Parameters
----------
data : :class:`~pandas.DataFrame`
data to plot. Data must either be acceleration data (:obj:`~biopsykit.utils.datatype_helper.AccDataFrame`),
gyroscope data (:obj:`~biopsykit.utils.datatype_helper.GyrDataFrame`), or IMU data
(:obj:`~biopsykit.utils.datatype_helper.ImuDataFrame`).
datastreams : str or list of str, optional
list of datastreams indicating which type of data should be plotted or ``None`` to only plot acceleration data.
If more than one type of datastream is specified each datastream is plotted row-wise in its own subplot.
Default: ``None``
sleep_endpoints : :obj:`~biopsykit.utils.datatype_helper.SleepEndpointDict`
dictionary with sleep endpoints to add to plot or ``None`` to only plot IMU data.
downsample_factor : int, optional
downsample factor to apply to raw input data before plotting or ``None`` to not downsample data before
plotting (downsample factor 1). Default: ``None``
**kwargs
optional arguments for plot configuration.
To configure which type of sleep endpoint annotations to plot:
* ``plot_sleep_onset``: whether to plot sleep onset annotations or not: Default: ``True``
* ``plot_wake_onset``: whether to plot wake onset annotations or not: Default: ``True``
* ``plot_bed_start``: whether to plot bed interval start annotations or not: Default: ``True``
* ``plot_bed_end``: whether to plot bed interval end annotations or not: Default: ``True``
* ``plot_sleep_wake``: whether to plot vspans of detected sleep/wake phases or not: Default: ``True``
To style general plot appearance:
* ``axs``: pre-existing axes for the plot. Otherwise, a new figure and axes objects are created and
returned.
* ``figsize``: tuple specifying figure dimensions
* ``palette``: color palette to plot different axes from input data
To style axes:
* ``xlabel``: label of x axis. Default: "Time"
* ``ylabel``: label of y axis. Default: "Acceleration :math:`[m/s^2]`" for acceleration data and
"Angular Velocity :math:`[°/s]`" for gyroscope data
To style legend:
* ``legend_loc``: location of legend. Default: "lower left"
* ``legend_fontsize``: font size of legend labels. Default: "smaller"
Returns
-------
fig : :class:`~matplotlib.figure.Figure`
figure object
axs : list of :class:`~matplotlib.axes.Axes`
list of subplot axes objects
"""
axs: List[plt.Axes] = kwargs.pop("ax", kwargs.pop("axs", None))
sns.set_palette(kwargs.get("palette", sns.light_palette(getattr(colors_all, "fau"), n_colors=4, reverse=True)[:-1]))
if datastreams is None:
datastreams = ["acc"]
if isinstance(datastreams, str):
# ensure list
datastreams = [datastreams]
fig, axs = _sleep_imu_plot_get_fig_axs(axs, len(datastreams), **kwargs)
downsample_factor = _sleep_imu_plot_get_downsample_factor(downsample_factor)
if len(datastreams) != len(axs):
raise ValueError(
"Number of datastreams to be plotted must match number of provided subplots! Expected {}, got {}.".format(
len(datastreams), len(axs)
)
)
for ax, ds in zip(axs, datastreams):
_sleep_imu_plot(
data=data,
datastream=ds,
downsample_factor=downsample_factor,
sleep_endpoints=sleep_endpoints,
ax=ax,
**kwargs,
)
fig.tight_layout()
fig.autofmt_xdate(rotation=0, ha="center")
return fig, axs
def _sleep_imu_plot_get_fig_axs(axs: List[plt.Axes], nrows: int, **kwargs):
figsize = kwargs.get("figsize", None)
if isinstance(axs, plt.Axes):
# ensure list (if only one Axes object is passed to sleep_imu_plot() instead of a list of Axes objects)
axs = [axs]
if axs is None:
fig, axs = plt.subplots(figsize=figsize, nrows=nrows)
else:
fig = axs[0].get_figure()
if isinstance(axs, plt.Axes):
# ensure list (if nrows == 1 only one axes object will be created, not a list of axes)
axs = [axs]
return fig, axs
def _sleep_imu_plot_get_downsample_factor(downsample_factor: int):
if downsample_factor is None:
downsample_factor = 1
# ensure int
downsample_factor = int(downsample_factor)
if downsample_factor < 1:
raise ValueError("'downsample_factor' must be >= 1!")
return downsample_factor
def _sleep_imu_plot(
data: pd.DataFrame,
datastream: str,
downsample_factor: int,
sleep_endpoints: SleepEndpointDict,
ax: plt.Axes,
**kwargs,
):
legend_loc = kwargs.get("legend_loc", "lower left")
legend_fontsize = kwargs.get("legend_fontsize", "smaller")
ylabel = kwargs.get("ylabel", {"acc": "Acceleration [$m/s^2$]", "gyr": "Angular Velocity [$°/s$]"})
xlabel = kwargs.get("xlabel", "Time")
if isinstance(data.index, pd.DatetimeIndex):
plt.rcParams["timezone"] = data.index.tz.zone
data_plot = data.filter(like=datastream)[::downsample_factor]
data_plot.plot(ax=ax)
if sleep_endpoints is not None:
kwargs.setdefault("ax", ax)
_sleep_imu_plot_add_sleep_endpoints(sleep_endpoints=sleep_endpoints, **kwargs)
if isinstance(data_plot.index, pd.DatetimeIndex):
# TODO add axis style for non-Datetime axes
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M:%S"))
ax.xaxis.set_minor_locator(mticks.AutoMinorLocator(6))
ax.set_ylabel(ylabel[datastream])
ax.set_xlabel(xlabel)
ax.legend(loc=legend_loc, fontsize=legend_fontsize, framealpha=1.0)
def _sleep_imu_plot_add_sleep_endpoints(sleep_endpoints: SleepEndpointDict, **kwargs):
bed_start = pd.to_datetime(sleep_endpoints["bed_interval_start"])
bed_end = | pd.to_datetime(sleep_endpoints["bed_interval_end"]) | pandas.to_datetime |
# divid by osm edge points and nodes
import pymongo
import json
import pandas as pd
import geopandas as gpd
import hashids
from hashids import Hashids
from shapely.geometry import LineString, Polygon, Point
import shapely
import osmnx
import geojson
import geopy.distance
myclient = pymongo.MongoClient("mongodb://192.168.120.127:27017")
mydb = myclient["tmap2"]
mylink = mydb["link1"]
myway = mydb["osm_edge"]
mylink_update = mydb["link2"]
districts = [
{'city': '杭州市', 'district': '上城区'},
{'city': '杭州市', 'district': '下城区'},
{'city': '杭州市', 'district': '拱墅区'},
{'city': '杭州市', 'district': '西湖区'},
{'city': '杭州市', 'district': '江干区'},
{'city': '杭州市', 'district': '滨江区'},
]
# save and read json as dict
def dump_json(path):
myway_dict = {}
for x in myway.find():
name = x["name"]
if str(name) != "nan":
if name not in myway_dict.keys():
myway_dict[name] = [x["u"], x["v"]]
else:
nodes_to_process = [x["u"], x["v"]]
for item in nodes_to_process:
if item not in myway_dict[name]:
myway_dict[name].append(item)
with open(path, 'w') as f:
json.dump(myway_dict, f)
def read_json(path):
with open(path, 'r') as f:
data = json.load(f)
return data
def gen_osm_id(*args):
hashid = hashids.encode(*args)
return hashid
hashids = Hashids(salt='this is my salt 1')
json_path = 'data/name.json'
dump_json(json_path)
way_dict = read_json(json_path)
# load node and point relation
def get_district_gdf(districts):
district_docs = []
for district in districts:
doc = mydb.district.find_one(district)
district_docs.append(doc)
district_gdf = | pd.DataFrame(district_docs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 22 14:50:25 2021
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import LSTM, Bidirectional, GRU
from keras.layers.recurrent import LSTM
from sklearn.utils import shuffle
import seaborn as sns
import matplotlib.pyplot as plt
import math
data1 = pd.read_csv("B05_birlestirilmis.csv")
data2 = pd.read_csv("B07_birlestirilmis.csv")
data3 = pd.read_csv("B18_birlestirilmis.csv")
data4 = pd.read_csv("B33_birlestirilmis.csv")
data5 = pd.read_csv("B34_birlestirilmis.csv")
data6 = pd.read_csv("B46_birlestirilmis.csv")
data7 = pd.read_csv("B47_birlestirilmis.csv")
data8 = pd.read_csv("B48_birlestirilmis.csv")
X1=data1.iloc[:,0:31]
Y1=data1.iloc[:,30:31]
X2=data2.iloc[:,0:31]
Y2=data2.iloc[:,30:31]
X3=data3.iloc[:,0:31]
Y3=data3.iloc[:,30:31]
X4=data4.iloc[:,0:31]
Y4=data4.iloc[:,30:31]
X5=data5.iloc[:,0:31]
Y5=data5.iloc[:,30:31]
X6=data6.iloc[:,0:31]
Y6=data6.iloc[:,30:31]
X7=data7.iloc[:,0:31]
Y7=data7.iloc[:,30:31]
X8=data8.iloc[:,0:31]
Y8=data8.iloc[:,30:31]
#verilerin egitim ve test icin bolunmesi
from sklearn.model_selection import train_test_split
trX1, teX1,trY1,teY1 = train_test_split(X1,Y1,test_size=0.20, random_state=0)
trX2, teX2,trY2,teY2 = train_test_split(X2,Y2,test_size=0.20, random_state=0)
trX3, teX3,trY3,teY3 = train_test_split(X3,Y3,test_size=0.20, random_state=0)
trX4, teX4,trY4,teY4 = train_test_split(X4,Y4,test_size=0.20, random_state=0)
trX5, teX5,trY5,teY5 = train_test_split(X5,Y5,test_size=0.20, random_state=0)
trX6, teX6,trY6,teY6 = train_test_split(X6,Y6,test_size=0.20, random_state=0)
trX7, teX7,trY7,teY7 = train_test_split(X7,Y7,test_size=0.20, random_state=0)
trX8, teX8,trY8,teY8 = train_test_split(X8,Y8,test_size=0.20, random_state=0)
tesX1=pd.DataFrame(teX1).sort_index()
tesY1=pd.DataFrame(teY1).sort_index()
tesX2=pd.DataFrame(teX2).sort_index()
tesY2=pd.DataFrame(teY2).sort_index()
tesX3=pd.DataFrame(teX3).sort_index()
tesY3=pd.DataFrame(teY3).sort_index()
tesX4=pd.DataFrame(teX4).sort_index()
tesY4=pd.DataFrame(teY4).sort_index()
tesX5=pd.DataFrame(teX5).sort_index()
tesY5=pd.DataFrame(teY5).sort_index()
tesX6=pd.DataFrame(teX6).sort_index()
tesY6=pd.DataFrame(teY6).sort_index()
tesX7=pd.DataFrame(teX7).sort_index()
tesY7=pd.DataFrame(teY7).sort_index()
tesX8=pd.DataFrame(teX8).sort_index()
tesY8=pd.DataFrame(teY8).sort_index()
trainX1=pd.DataFrame(trX1).sort_index()
trainY1=pd.DataFrame(trY1).sort_index()
trainX2=pd.DataFrame(trX2).sort_index()
trainY2=pd.DataFrame(trY2).sort_index()
trainX3=pd.DataFrame(trX3).sort_index()
trainY3=pd.DataFrame(trY3).sort_index()
trainX4=pd.DataFrame(trX4).sort_index()
trainY4=pd.DataFrame(trY4).sort_index()
trainX5=pd.DataFrame(trX5).sort_index()
trainY5=pd.DataFrame(trY5).sort_index()
trainX6=pd.DataFrame(trX6).sort_index()
trainY6=pd.DataFrame(trY6).sort_index()
trainX7=pd.DataFrame(trX7).sort_index()
trainY7=pd.DataFrame(trY7).sort_index()
trainX8=pd.DataFrame(trX8).sort_index()
trainY8=pd.DataFrame(trY8).sort_index()
trainXpart1=pd.concat((trainX1,trainX2),axis=0)
trainXpart2=pd.concat((trainX3,trainX4),axis=0)
trainXpart3=pd.concat((trainX5,trainX6),axis=0)
trainXpart4=pd.concat((trainX7,trainX8),axis=0)
trainXparta=pd.concat((trainXpart1,trainXpart2),axis=0)
trainXpartb=pd.concat((trainXpart3,trainXpart4),axis=0)
trainX=pd.concat((trainXparta,trainXpartb),axis=0)
trainX=trainX.values
trainYpart1=pd.concat((trainY1,trainY2),axis=0)
trainYpart2=pd.concat((trainY3,trainY4),axis=0)
trainYpart3=pd.concat((trainY5,trainY6),axis=0)
trainYpart4=pd.concat((trainY7,trainY8),axis=0)
trainYparta=pd.concat((trainYpart1,trainYpart2),axis=0)
trainYpartb=pd.concat((trainYpart3,trainYpart4),axis=0)
trainY= | pd.concat((trainYparta,trainYpartb),axis=0) | pandas.concat |
# imports
from sklearn.cluster import KMeans
import pandas as pd
import plotly.express as px
from numpy import random
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from flask import Flask, render_template, request, redirect, url_for, send_file
import base64
from io import BytesIO
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import time
from flaskwebgui import FlaskUI
from sklearn.ensemble import IsolationForest
app = Flask(__name__)
#ui = FlaskUI(app)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
#cache = Cache()
#cache = Cache(app, config={'CACHE_TYPE': 'simple'})
app.config['SESSION_COOKIE_SECURE'] = False
# In[19]:
global resp, df, urls, xy, display, k_means
@app.route('/')
def hello_world():
global resp
resp = {}
# When "submit" button is clicked,
# print order of names to console,
# then reorder python names list and render_template.
return render_template('index.html')
@app.route('/uploader', methods = ['GET', 'POST'])
def upload_file():
global resp, df, urls,xy, display
urls = []
if request.method == 'POST':
f = request.files['file']
resp['cols'] = list(pd.read_excel(f).columns)
df = | pd.read_excel(f) | pandas.read_excel |
from os.path import abspath, dirname, join
import h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from utils import make_dir, numpy_ewma_vectorized_v2, plot_postprocess, print_init, label_converter, series_indexer, \
color4label
class LogData(object):
def __init__(self):
self.mean = []
self.std = []
self.min = []
self.max = []
def log(self, sample):
"""
:param sample: data for logging specified as a numpy.array
:return:
"""
self.mean.append(sample.mean())
self.std.append(sample.std())
self.min.append(sample.min())
self.max.append(sample.max())
def save(self, group):
"""
:param group: the reference to the group level hierarchy of a .hdf5 file to save the data
:return:
"""
for key, val in self.__dict__.items():
group.create_dataset(key, data=val)
def load(self, group, decimate_step=100):
"""
:param decimate_step:
:param group: the reference to the group level hierarchy of a .hdf5 file to load
:return:
"""
# read in parameters
# [()] is needed to read in the whole array if you don't do that,
# it doesn't read the whole data but instead gives you lazy access to sub-parts
# (very useful when the array is huge but you only need a small part of it).
# https://stackoverflow.com/questions/10274476/how-to-export-hdf5-file-to-numpy-using-h5py
self.mean = group["mean"][()][::decimate_step]
self.std = group["std"][()][::decimate_step]
self.min = group["min"][()][::decimate_step]
self.max = group["max"][()][::decimate_step]
def plot_mean_min_max(self, label):
plt.fill_between(range(len(self.mean)), self.max, self.min, alpha=.5)
plt.plot(self.mean, label=label)
def plot_mean_std(self, label):
mean = np.array(self.mean)
plt.fill_between(range(len(self.mean)), mean + self.std, mean - self.std, alpha=.5)
plt.plot(self.mean, label=label)
class TemporalLogger(object):
def __init__(self, env_name, timestamp, log_dir, *args):
"""
Creates a TemporalLogger object. If the folder structure is nonexistent, it will also be created
:param *args:
:param env_name: name of the environment
:param timestamp: timestamp as a string
:param log_dir: logging directory, if it is None, then logging will be at the same hierarchy level as src/
"""
super().__init__()
self.timestamp = timestamp
# file structure
self.base_dir = join(dirname(dirname(abspath(__file__))), "log") if log_dir is None else log_dir
self.data_dir = join(self.base_dir, env_name)
make_dir(self.base_dir)
make_dir(self.data_dir)
# data
for data in args:
self.__dict__[data] = LogData()
def log(self, **kwargs):
"""
Function for storing the new values of the given attribute
:param **kwargs:
:return:
"""
for key, value in kwargs.items():
self.__dict__[key].log(value)
def save(self, *args):
"""
Saves the temporal statistics into a .hdf5 file
:param **kwargs:
:return:
"""
with h5py.File(join(self.data_dir, 'time_log_' + self.timestamp + '.hdf5'), 'w') as f:
for arg in args:
self.__dict__[arg].save(f.create_group(arg))
def load(self, filename, decimate_step=100):
"""
Loads the temporal statistics and fills the attributes of the class
:param decimate_step:
:param filename: name of the .hdf5 file to load
:return:
"""
if not filename.endswith('.hdf5'):
filename = filename + '.hdf5'
with h5py.File(join(self.data_dir, filename), 'r') as f:
for key, value in self.__dict__.items():
if isinstance(value, LogData):
value.load(f[key], decimate_step)
def plot_mean_min_max(self, *args):
fig, ax, _ = print_init(False)
for arg in args:
# breakpoint()
if arg in self.__dict__.keys(): # and isinstance(self.__dict__[arg], LogData):
self.__dict__[arg].plot_mean_min_max(arg)
plt.title("Mean and min-max statistics")
plot_postprocess(ax, f"Mean and min-max statistics of {args}",
ylabel=r"$\mu$")
def plot_mean_std(self, *args):
fig, ax, _ = print_init(False)
for arg in args:
if arg in self.__dict__.keys():
self.__dict__[arg].plot_mean_std(arg)
plt.title("Mean and standard deviation statistics")
plot_postprocess(ax, f"Mean and standard deviation statistics of {args}",
ylabel=r"$\mu$")
class EnvLogger(object):
def __init__(self, env_name, log_dir, decimate_step=250) -> None:
super().__init__()
self.env_name = env_name
self.log_dir = log_dir
self.decimate_step = decimate_step
self.data_dir = join(self.log_dir, self.env_name)
self.fig_dir = self.base_dir = join(dirname(dirname(abspath(__file__))), join("figures", self.env_name))
make_dir(self.fig_dir)
self.params_df = pd.read_csv(join(self.data_dir, "params.tsv"), "\t")
self.logs = {}
mean_reward = []
mean_feat_std = []
mean_proxy = []
# load trainings
for timestamp in self.params_df.timestamp:
self.logs[timestamp] = TemporalLogger(self.env_name, timestamp, self.log_dir, *["rewards", "features"])
self.logs[timestamp].load(join(self.data_dir, f"time_log_{timestamp}"), self.decimate_step)
# calculate statistics
mean_reward.append(self.logs[timestamp].__dict__["rewards"].mean.mean())
mean_feat_std.append(self.logs[timestamp].__dict__["features"].std.mean())
mean_proxy.append(mean_reward[-1] * mean_feat_std[-1])
# append statistics to df
self.params_df["mean_reward"] = pd.Series(mean_reward, index=self.params_df.index)
self.params_df["mean_feat_std"] = | pd.Series(mean_feat_std, index=self.params_df.index) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas
from pandas.api.types import is_scalar
from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl
import pandas.core.common as com
from pandas.core.dtypes.common import (
_get_dtype_from_object,
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_dtype_equal,
is_object_dtype,
is_integer_dtype,
)
from pandas.core.index import _ensure_index_from_sequences
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.util._validators import validate_bool_kwarg
import itertools
import functools
import numpy as np
import re
import sys
import warnings
from modin.error_message import ErrorMessage
from .utils import from_pandas, to_pandas, _inherit_docstrings
from .iterator import PartitionIterator
from .series import SeriesView
@_inherit_docstrings(
pandas.DataFrame, excluded=[pandas.DataFrame, pandas.DataFrame.__init__]
)
class DataFrame(object):
def __init__(
self,
data=None,
index=None,
columns=None,
dtype=None,
copy=False,
query_compiler=None,
):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
data (numpy ndarray (structured or homogeneous) or dict):
Dict can contain Series, arrays, constants, or list-like
objects.
index (pandas.Index, list, ObjectID): The row index for this
DataFrame.
columns (pandas.Index): The column names for this DataFrame, in
pandas Index object.
dtype: Data type to force. Only a single dtype is allowed.
If None, infer
copy (boolean): Copy data from inputs.
Only affects DataFrame / 2d ndarray input.
query_compiler: A query compiler object to manage distributed computation.
"""
if isinstance(data, DataFrame):
self._query_compiler = data._query_compiler
return
# Check type of data and use appropriate constructor
if data is not None or query_compiler is None:
pandas_df = pandas.DataFrame(
data=data, index=index, columns=columns, dtype=dtype, copy=copy
)
self._query_compiler = from_pandas(pandas_df)._query_compiler
else:
self._query_compiler = query_compiler
def __str__(self):
return repr(self)
def _build_repr_df(self, num_rows, num_cols):
# Add one here so that pandas automatically adds the dots
# It turns out to be faster to extract 2 extra rows and columns than to
# build the dots ourselves.
num_rows_for_head = num_rows // 2 + 1
num_cols_for_front = num_cols // 2 + 1
if len(self.index) <= num_rows:
head = self._query_compiler
tail = None
else:
head = self._query_compiler.head(num_rows_for_head)
tail = self._query_compiler.tail(num_rows_for_head)
if len(self.columns) <= num_cols:
head_front = head.to_pandas()
# Creating these empty to make the concat logic simpler
head_back = pandas.DataFrame()
tail_back = pandas.DataFrame()
if tail is not None:
tail_front = tail.to_pandas()
else:
tail_front = pandas.DataFrame()
else:
head_front = head.front(num_cols_for_front).to_pandas()
head_back = head.back(num_cols_for_front).to_pandas()
if tail is not None:
tail_front = tail.front(num_cols_for_front).to_pandas()
tail_back = tail.back(num_cols_for_front).to_pandas()
else:
tail_front = tail_back = pandas.DataFrame()
head_for_repr = pandas.concat([head_front, head_back], axis=1)
tail_for_repr = pandas.concat([tail_front, tail_back], axis=1)
return pandas.concat([head_for_repr, tail_for_repr])
def __repr__(self):
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 30
result = repr(self._build_repr_df(num_rows, num_cols))
if len(self.index) > num_rows or len(self.columns) > num_cols:
# The split here is so that we don't repr pandas row lengths.
return result.rsplit("\n\n", 1)[0] + "\n\n[{0} rows x {1} columns]".format(
len(self.index), len(self.columns)
)
else:
return result
def _repr_html_(self):
"""repr function for rendering in Jupyter Notebooks like Pandas
Dataframes.
Returns:
The HTML representation of a Dataframe.
"""
# In the future, we can have this be configurable, just like Pandas.
num_rows = 60
num_cols = 20
# We use pandas _repr_html_ to get a string of the HTML representation
# of the dataframe.
result = self._build_repr_df(num_rows, num_cols)._repr_html_()
if len(self.index) > num_rows or len(self.columns) > num_cols:
# We split so that we insert our correct dataframe dimensions.
return result.split("<p>")[
0
] + "<p>{0} rows x {1} columns</p>\n</div>".format(
len(self.index), len(self.columns)
)
else:
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.index
def _get_columns(self):
"""Get the columns for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._query_compiler.columns
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.index = new_index
def _set_columns(self, new_columns):
"""Set the columns for this DataFrame.
Args:
new_index: The new index to set this
"""
self._query_compiler.columns = new_columns
index = property(_get_index, _set_index)
columns = property(_get_columns, _set_columns)
def _validate_eval_query(self, expr, **kwargs):
"""Helper function to check the arguments to eval() and query()
Args:
expr: The expression to evaluate. This string cannot contain any
Python statements, only Python expressions.
"""
if isinstance(expr, str) and expr is "":
raise ValueError("expr cannot be an empty string")
if isinstance(expr, str) and "@" in expr:
ErrorMessage.not_implemented("Local variables not yet supported in eval.")
if isinstance(expr, str) and "not" in expr:
if "parser" in kwargs and kwargs["parser"] == "python":
ErrorMessage.not_implemented("'Not' nodes are not implemented.")
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# DataFrames have an invariant that requires they be 2 dimensions.
return 2
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
dtypes = self.dtypes.copy()
ftypes = ["{0}:dense".format(str(dtype)) for dtype in dtypes.values]
result = pandas.Series(ftypes, index=self.columns)
return result
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
return self._query_compiler.dtypes
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
return len(self.columns) == 0 or len(self.index) == 0
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return to_pandas(self).values
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return len(self.index), len(self.columns)
def _update_inplace(self, new_query_compiler):
"""Updates the current DataFrame inplace.
Args:
new_query_compiler: The new QueryCompiler to use to manage the data
"""
old_query_compiler = self._query_compiler
self._query_compiler = new_query_compiler
old_query_compiler.free()
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_prefix(prefix))
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
return DataFrame(query_compiler=self._query_compiler.add_suffix(suffix))
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
if not callable(func):
raise ValueError("'{0}' object is not callable".format(type(func)))
ErrorMessage.non_verified_udf()
return DataFrame(query_compiler=self._query_compiler.applymap(func))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(query_compiler=self._query_compiler.copy())
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
**kwargs
):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
sort: Whether or not to sort the result by the index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
axis = pandas.DataFrame()._get_axis_number(axis)
idx_name = ""
if callable(by):
by = by(self.index)
elif isinstance(by, string_types):
idx_name = by
by = self.__getitem__(by).values.tolist()
elif is_list_like(by):
if isinstance(by, pandas.Series):
by = by.values.tolist()
mismatch = (
len(by) != len(self) if axis == 0 else len(by) != len(self.columns)
)
if all(obj in self for obj in by) and mismatch:
# In the future, we will need to add logic to handle this, but for now
# we default to pandas in this case.
pass
elif mismatch:
raise KeyError(next(x for x in by if x not in self))
from .groupby import DataFrameGroupBy
return DataFrameGroupBy(
self,
by,
axis,
level,
as_index,
sort,
group_keys,
squeeze,
idx_name,
**kwargs
)
def sum(
self,
axis=None,
skipna=True,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
self._validate_dtypes_sum_prod_mean(axis, numeric_only, ignore_axis=False)
return self._query_compiler.sum(
axis=axis,
skipna=skipna,
level=level,
numeric_only=numeric_only,
min_count=min_count,
**kwargs
)
def abs(self):
"""Apply an absolute value function to all numeric columns.
Returns:
A new DataFrame with the applied absolute value.
"""
self._validate_dtypes(numeric_only=True)
return DataFrame(query_compiler=self._query_compiler.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return DataFrame(query_compiler=self._query_compiler.isin(values=values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return DataFrame(query_compiler=self._query_compiler.isnull())
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Returns:
A new DataFrame transposed from this DataFrame.
"""
return DataFrame(query_compiler=self._query_compiler.transpose(*args, **kwargs))
T = property(transpose)
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if is_list_like(axis):
axis = [pandas.DataFrame()._get_axis_number(ax) for ax in axis]
result = self
for ax in axis:
result = result.dropna(axis=ax, how=how, thresh=thresh, subset=subset)
return self._create_dataframe_from_compiler(result._query_compiler, inplace)
axis = pandas.DataFrame()._get_axis_number(axis)
if how is not None and how not in ["any", "all"]:
raise ValueError("invalid how option: %s" % how)
if how is None and thresh is None:
raise TypeError("must specify how or thresh")
if subset is not None:
if axis == 1:
indices = self.index.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
else:
indices = self.columns.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
new_query_compiler = self._query_compiler.dropna(
axis=axis, how=how, thresh=thresh, subset=subset
)
return self._create_dataframe_from_compiler(new_query_compiler, inplace)
def add(self, other, axis="columns", level=None, fill_value=None):
"""Add this DataFrame to another or a scalar/list.
Args:
other: What to add this this DataFrame.
axis: The axis to apply addition over. Only applicaable to Series
or list 'other'.
level: A level in the multilevel axis to add over.
fill_value: The value to fill NaN.
Returns:
A new DataFrame with the applied addition.
"""
axis = | pandas.DataFrame() | pandas.DataFrame |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import datetime
from typing import Any, Dict
from pandas import DataFrame, concat
from lib.arcgis_data_source import ArcGISDataSource
from lib.case_line import convert_cases_to_time_series
from lib.cast import safe_int_cast
fromtimestamp = datetime.datetime.fromtimestamp
class FloridaDataSource(ArcGISDataSource):
def parse(self, sources: Dict[Any, str], aux: Dict[str, DataFrame], **parse_opts) -> DataFrame:
with open(sources[0], "r") as fd:
records = json.load(fd)["features"]
cases = DataFrame.from_records(records)
cases["date_new_confirmed"] = cases["ChartDate"].apply(
lambda x: fromtimestamp(x // 1000).date().isoformat()
)
# FL does not provide date for deceased or hospitalized, so we just copy it from confirmed
deceased_mask = cases.Died.str.lower() == "yes"
hospitalized_mask = cases.Hospitalized.str.lower() == "yes"
cases["date_new_deceased"] = None
cases["date_new_hospitalized"] = None
cases.loc[deceased_mask, "date_new_deceased"] = cases.loc[
deceased_mask, "date_new_confirmed"
]
cases.loc[hospitalized_mask, "date_new_hospitalized"] = cases.loc[
hospitalized_mask, "date_new_confirmed"
]
# Rename the sex labels
sex_adapter = lambda x: {"male": "male", "female": "female"}.get(x, "sex_unknown")
cases["sex"] = cases["Gender"].str.lower().apply(sex_adapter)
cases.drop(columns=["Gender"], inplace=True)
# Make sure age is an integer
cases["age"] = cases["Age"].apply(safe_int_cast)
cases.drop(columns=["Age"], inplace=True)
cases = cases.rename(columns={"County": "match_string"})
data = convert_cases_to_time_series(cases, ["match_string"])
data["country_code"] = "US"
data["subregion1_code"] = "FL"
# Aggregate to state level here, since some data locations are "Unknown"
group_cols = ["country_code", "subregion1_code", "date", "age", "sex"]
state = data.drop(columns=["match_string"]).groupby(group_cols).sum().reset_index()
# Remove bogus data
data = data[data.match_string != "Unknown"]
return | concat([state, data]) | pandas.concat |
import numpy as np
from csv import reader
from sklearn.model_selection import train_test_split
import pandas as pd
with open('glass_data.csv') as f:
raw_data = f.read()
####PREPROCESS OF THE DATASET######
def data_preprocess(raw_data):
# Load a CSV file
dataset = list()
#with filename as file:
csv_reader = reader(raw_data.split('\n'), delimiter=',')
for row in csv_reader:
if not row:
continue
dataset.append(row)
pd_data = pd.DataFrame(dataset)
labels = pd_data.iloc[:,-1].values
labels = labels[:, np.newaxis]
#CONVERTING TEXT CLASS LABELS TO NUMBERS
b, c = np.unique(labels, return_inverse=True)
labels = c[:, np.newaxis] + 1
labels = pd.DataFrame(labels)
pd_data.drop(pd_data.columns[len(pd_data.columns)-1], axis=1, inplace=True)
result = | pd.concat([pd_data, labels], axis=1) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import itertools
import json
import operator
import os
from pathlib import Path
from pprint import pprint
import re
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.lines import Line2D
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from tqdm.notebook import tqdm
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png')
# ## Load data and preprocess
# ### Metadata
# In[ ]:
# Map from test suite tag to high-level circuit.
circuits = {
"Licensing": ["npi", "reflexive"],
"Long-Distance Dependencies": ["fgd", "cleft"],
"Agreement": ["number"],
"Garden-Path Effects": ["npz", "mvrr"],
"Gross Syntactic State": ["subordination"],
"Center Embedding": ["center"],
}
tag_to_circuit = {tag: circuit
for circuit, tags in circuits.items()
for tag in tags}
# In[ ]:
# Map codenames to readable names for various columns.
def format_pretrained(model_name):
return "%s$^*$" % model_name
PRETTY_COLUMN_MAPS = [
("model_name",
{
"vanilla": "LSTM",
"ordered-neurons": "ON-LSTM",
"rnng": "RNNG",
"ngram": "n-gram",
"random": "Random",
"gpt-2-pretrained": format_pretrained("GPT-2"),
"gpt-2-xl-pretrained": format_pretrained("GPT-2-XL"),
"gpt-2": "GPT-2",
"transformer-xl": format_pretrained("Transformer-XL"),
"grnn": format_pretrained("GRNN"),
"jrnn": format_pretrained("JRNN"),
}),
("corpus", lambda x: x.upper() if x else "N/A"),
]
PRETTY_COLUMNS = ["pretty_%s" % col for col, _ in PRETTY_COLUMN_MAPS]
# In[ ]:
# Exclusions
exclude_suite_re = re.compile(r"^fgd-embed[34]|^gardenpath|^nn-nv")
exclude_models = ["1gram", "ngram-no-rand"] # "ngram",
# In[ ]:
ngram_models = ["1gram", "ngram", "ngram-single"]
baseline_models = ["random"]
# Models for which we designed a controlled training regime
controlled_models = ["ngram", "ordered-neurons", "vanilla", "rnng", "gpt-2"]
controlled_nonbpe_models = ["ngram", "ordered-neurons", "vanilla", "rnng"]
# ### Load
# In[ ]:
ppl_data_path = Path("../data/raw/perplexity.csv")
test_suite_results_path = Path("../data/raw/sg_results")
# In[ ]:
perplexity_df = pd.read_csv(ppl_data_path, index_col=["model", "corpus", "seed"])
perplexity_df.index.set_names("model_name", level=0, inplace=True)
results_df = pd.concat([ | pd.read_csv(f) | pandas.read_csv |
from dataclasses import dataclass, field
from operator import itemgetter
from typing import Set, Dict, List, Optional, Tuple
from urllib.parse import urlparse
import copy
import orjson
import pandas as pd
from kgdata.wikidata.models import WDEntity
@dataclass
class Link:
href: str
start: int
end: int
is_selflink: bool = False
@property
def url(self):
if self.href.startswith("/"):
return "http://en.wikipedia.org" + self.href
if self.href.startswith("https://en.wikipedia.org"):
return self.href.replace("https://", "http://")
return self.href
def is_wikipedia_link(self):
return self.url.startswith("http://en.wikipedia.org")
def is_wikipedia_article_link(self):
return self.url.startswith("http://en.wikipedia.org/wiki/")
@dataclass
class ExternalLink:
__slots__ = ("dbpedia", "qnode", "qnode_id")
dbpedia: Optional[dict]
qnode: Optional[WDEntity]
qnode_id: Optional[str]
@staticmethod
def from_dict(o: dict):
if o["qnode"] is not None:
o["qnode"] = WDEntity.from_dict(o["qnode"])
return ExternalLink(**o)
@dataclass
class Cell:
value: str
html: str
links: List[Link] = field(default_factory=list)
attrs: Dict[str, str] = field(default_factory=dict)
is_header: bool = False
rowspan: int = 1
colspan: int = 1
original_rowspan: Optional[int] = None
original_colspan: Optional[int] = None
@property
def embedded_html_links(self):
text = self.value
embedded_links = []
for i, link in enumerate(self.links):
if i == 0:
embedded_links.append(text[: link.start])
else:
assert self.links[i - 1].start <= link.start, "The list is sorted"
embedded_links.append(text[self.links[i - 1].end : link.start])
embedded_links.append(
f'<a href="{link.url}" target="_blank" rel="noopener">'
+ text[link.start : link.end]
+ "</a>"
)
if i == len(self.links) - 1:
embedded_links.append(text[link.end :])
if len(self.links) == 0:
embedded_links.append(text)
return "".join(embedded_links)
@property
def this(self):
"""Return its self. Useful if we want to convert the table to list of cells"""
return self
@staticmethod
def from_dict(o: dict):
o["links"] = [Link(**l) for l in o["links"]]
return Cell(**o)
def clone(self):
"""Clone the current cell. This operator will be much cheaper than the deepcopy operator"""
return Cell(
value=self.value,
html=self.html,
links=copy.copy(self.links),
attrs=copy.copy(self.attrs),
is_header=self.is_header,
rowspan=self.rowspan,
colspan=self.colspan,
original_rowspan=self.original_rowspan,
original_colspan=self.original_colspan,
)
@dataclass
class Row:
cells: List[Cell]
attrs: Dict[str, str] = field(default_factory=dict)
@staticmethod
def from_dict(o: dict):
o["cells"] = [Cell.from_dict(c) for c in o["cells"]]
return Row(**o)
def __getitem__(self, index):
return self.cells[index]
class OverlapSpanException(Exception):
"""Indicating the table has cell rowspan and cell colspan overlaps"""
pass
class InvalidColumnSpanException(Exception):
"""Indicating that the column span is not used in a standard way. In particular, the total of columns' span is beyond the maximum number of columns is considered
to be non standard with one exception that only the last column spans more than the maximum number of columns
"""
pass
@dataclass
class Table:
id: str
pageURI: str
rows: List[Row]
caption: Optional[str] = None
attrs: Dict[str, str] = field(default_factory=dict)
classes: Set[str] = field(default_factory=set)
is_spanned: bool = False
external_links: Optional[Dict[str, ExternalLink]] = None
@staticmethod
def from_dict(o: dict):
o["classes"] = set(o["classes"])
o["rows"] = [Row.from_dict(r) for r in o["rows"]]
if o["external_links"] is not None:
for k, v in o["external_links"].items():
o["external_links"][k] = (
ExternalLink.from_dict(v) if v is not None else None
)
return Table(**o)
@staticmethod
def deser_str(text):
return Table.from_dict(orjson.loads(text))
@property
def wikipediaURL(self):
"""Return a wikipedia URL from dbpedia URI"""
return urlparse(self.pageURI).path.replace(
"/resource/", "https://en.wikipedia.org/wiki/"
)
def ser_bytes(self):
return orjson.dumps(self, option=orjson.OPT_SERIALIZE_DATACLASS, default=list)
def pad(self) -> "Table":
"""Pad the irregular table (missing cells) to make it become regular table.
This function only return new table when it's padded
"""
if not self.is_spanned:
raise Exception("You should only pad the table after it's spanned")
if self.is_regular_table():
return self
max_ncols = max(len(r.cells) for r in self.rows)
default_cell = Cell(value="", html="", original_rowspan=1, original_colspan=1)
rows = []
for r in self.rows:
row = Row(cells=[c.clone() for c in r.cells], attrs=copy.copy(r.attrs))
while len(row.cells) < max_ncols:
row.cells.append(default_cell.clone())
rows.append(row)
return Table(
id=self.id,
pageURI=self.pageURI,
classes=copy.copy(self.classes),
rows=rows,
caption=self.caption,
attrs=copy.copy(self.attrs),
is_spanned=True,
external_links=copy.copy(self.external_links),
)
def span(self) -> "Table":
"""Span the table by copying values to merged field"""
pi = 0
data = []
pending_ops = {}
# >>> begin find the max #cols
# calculate the number of columns as some people may actually set unrealistic colspan as they are lazy..
# I try to make its behaviour as much closer to the browser as possible.
# one thing I notice that to find the correct value of colspan, they takes into account the #cells of rows below the current row
# so we may have to iterate several times
cols = [0 for _ in range(len(self.rows))]
for i, row in enumerate(self.rows):
cols[i] += len(row.cells)
for cell in row.cells:
if cell.rowspan > 1:
for j in range(1, cell.rowspan):
if i + j < len(cols):
cols[i + j] += 1
_row_index, max_ncols = max(enumerate(cols), key=itemgetter(1))
# sometimes they do show an extra cell for over-colspan row, but it's not consistent or at least not easy for me to find the rule
# so I decide to not handle that. Hope that we don't have many tables like that.
# >>> finish find the max #cols
for row in self.rows:
new_row = []
pj = 0
for cell_index, cell in enumerate(row.cells):
cell = cell.clone()
cell.original_colspan = cell.colspan
cell.original_rowspan = cell.rowspan
cell.colspan = 1
cell.rowspan = 1
# adding cell from the top
while (pi, pj) in pending_ops:
new_row.append(pending_ops[pi, pj].clone())
pending_ops.pop((pi, pj))
pj += 1
# now add cell and expand the column
for _ in range(cell.original_colspan):
if (pi, pj) in pending_ops:
# exception, overlapping between colspan and rowspan
raise OverlapSpanException()
new_row.append(cell.clone())
for ioffset in range(1, cell.original_rowspan):
# no need for this if below
# if (pi+ioffset, pj) in pending_ops:
# raise OverlapSpanException()
pending_ops[pi + ioffset, pj] = cell
pj += 1
if pj >= max_ncols:
# our algorithm cannot handle the case where people are bullying the colspan system, and only can handle the case
# where the span that goes beyond the maximum number of columns is in the last column.
if cell_index != len(row.cells) - 1:
raise InvalidColumnSpanException()
else:
break
# add more cells from the top since we reach the end
while (pi, pj) in pending_ops and pj < max_ncols:
new_row.append(pending_ops[pi, pj].clone())
pending_ops.pop((pi, pj))
pj += 1
data.append(Row(cells=new_row, attrs=copy.copy(row.attrs)))
pi += 1
# len(pending_ops) may > 0, but fortunately, it doesn't matter as the browser also does not render that extra empty lines
return Table(
id=self.id,
pageURI=self.pageURI,
classes=copy.copy(self.classes),
rows=data,
caption=self.caption,
attrs=copy.copy(self.attrs),
is_spanned=True,
external_links=copy.copy(self.external_links),
)
def is_regular_table(self) -> bool:
ncols = len(self.rows[0].cells)
return all(len(self.rows[i].cells) == ncols for i in range(1, len(self.rows)))
def get_shape(self) -> Tuple[int, int]:
"""Get shape of the table.
Returns
-------
Tuple[int, int]
A 2-tuples of rows' number and cols' number
Raises
------
Exception
When this is not a regular table (not 2D)
"""
if not self.is_regular_table():
raise Exception("Cannot get shape of an irregular table")
return len(self.rows), len(self.rows[0].cells)
def to_list(self, value_field: str = "value") -> list:
"""Convert the table into a list
Parameters
----------
value_field: str, optional
cell's field that we want to use as a value, default is `value`. Change to html or inner html to see the html
Returns
-------
list
"""
if not self.is_spanned:
return self.span().to_list()
return [[getattr(c, value_field) for c in row.cells] for row in self.rows]
def to_df(
self, use_header_when_possible: bool = True, value_field: str = "value"
) -> pd.DataFrame:
"""Convert table to dataframe. Auto-span the row by default. This will throw error
if the current table is not a regular table.
Parameters
----------
use_header_when_possible : bool, optional
if the first row is all header cells, we use it as the header. Note that other header cells
which aren't on the first row are going to be treated as data cells
value_field: str, optional
cell's field that we want to use as a value, default is `value`. Change to html or inner html to see the html
Returns
-------
pd.DataFrame
Data frame
"""
if not self.is_spanned:
return self.span().to_df(use_header_when_possible)
data = [[getattr(c, value_field) for c in r.cells] for r in self.rows]
if use_header_when_possible:
if all(c.is_header for c in self.rows[0].cells):
return pd.DataFrame(data[1:], columns=data[0])
return | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import matplotlib as mpl
import numpy as np
from sklearn import metrics
import itertools
import warnings
from dateutil.relativedelta import relativedelta
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import ticker
import matplotlib.dates as mdates
from matplotlib.dates import DateFormatter
# plt.style.use('ggplot')
sns.set_theme(style="darkgrid")
font = {'size' : 12}
mpl.rc('font', **font)
mpl.rc('figure', max_open_warning = 0)
| pd.set_option('display.max_columns',None) | pandas.set_option |
"""
econ_platform_core - Glue code for a unified work environment.
The center-piece of the package is *fetch()* a function which dynamically loads a pandas time series (Series object)
from any supported provider or database interface. The fetch command will determine whether the series exists in the
local database, or whether to query the external provider. If it already exists, the platform will decide whether it
is time to seek a refresh. (NOTE: That update protocol is not implemented at the time of writing.)
For a user, this is all done "under the hood" within a single line of code. The same plotting routines will always
work, even if the computer is cut off from external providers and the user has to grab locally archived data.
Normally, users will import econ_platform and call init_econ_platform(), which will
initialise this package.
Importing this file alone is supposed to have minimal side effects. However, configuration information is not
loaded, and so most functions will crash. This means that end uses will almost always want to call the initialisation
function (or import econ_platform.start, which is a script that initialises the platform).
This package is supposed to only depend on standard Python libraries and *pandas*. Anything else (including
things like matplotlib) are pushed into econ_platform, where code is meant to be loaded as extensions. If an extension
cannot be loaded (missing API packages, for example), econ_platform will still load up, it will just report that
an extension load failed.
Since sqlite3 is in the standard Python libraries, base SQL functionality will be implemented here.
(The design question is whether the code in here should migrate to other files. The issue is avoiding circular imports.
It might be possible, but I might need to redesign some classes, and create more "do nothing" base classes that just
offer an interface to users. Not a huge priority, and I should make sure I have more comprehensive test coverage
before trying.)
Copyright 2019 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pandas
import traceback
import webbrowser
import warnings
import datetime
# As a convenience, use the "logging.info" function as log.
from logging import info as log, debug as log_debug, warning as log_warning, error as log_error
import econ_platform_core.configuration
from econ_platform_core.entity_and_errors import PlatformEntity, PlatformError, TickerNotFoundError
from econ_platform_core import utils as utils
import econ_platform_core.extensions
from econ_platform_core.tickers import TickerFull, TickerDataType, TickerFetch, TickerLocal, TickerProviderCode
from econ_platform_core.series_metadata import SeriesMetadata
import econ_platform_core.tickers
# Get the logging information. Users can either programmatically change the LogInfo.LogDirectory or
# use a config file before calling start_log()
from econ_platform_core.update_protocols import UpdateProtocolManager, NoUpdateProtocol
LogInfo = utils.PlatformLogger()
def start_log(fname=None): # pragma: nocover
"""
Call this function if you want a log. By default, the log name is based on the base Python script name
(sys.argv[0]), and goes into the default directory (LonInfo.LogDirectory).
:param fname: str
:return:
"""
global LogInfo
LogInfo.StartLog(fname)
PlatformConfiguration = econ_platform_core.configuration.ConfigParserWrapper()
ExtensionList = econ_platform_core.extensions.ExtensionManager()
class DatabaseManager(PlatformEntity):
"""
This is the base class for Database Managers.
Note: Only support full series replacement for now.
SetsLastUpdateAutomatically: Does the database update the last_update/last_refresh fields automatically on
a write> If False, the UpdateProtocol will (supposed to...) call SetLastUpdate() after a Write()
"""
def __init__(self, name='Virtual Object'):
super().__init__()
self.Name = name
# This is overridden by the AdvancedDatabase constructor.
# By extension, everything derived from this base class (like the TEXT database is "not advanced."
self.IsAdvanced = False
self.Code = ''
self.ReplaceOnly = True
self.SetsLastUpdateAutomatically = False
def Find(self, ticker):
"""
Can we find the ticker on the database? Default behaviour is generally adequate.
:param ticker: str
:return: SeriesMetadata
"""
warnings.warn('Find is being replaced by GetMeta()', DeprecationWarning)
return self.GetMeta(ticker)
# ticker_obj = econ_platform_core.tickers.map_string_to_ticker(ticker)
# if type(ticker_obj) is TickerLocal:
# return self._FindLocal(ticker_obj)
# if type(ticker_obj) is TickerDataType:
# return self._FindDataType(ticker_obj)
# meta = SeriesMetadata()
# meta.ticker_local = None
# meta.ticker_full = ticker_obj
# meta.series_provider_code, meta.ticker_query = ticker_obj.SplitTicker()
# meta.Exists = self.Exists(meta)
# # Provider-specific meta data data not supported yet.
# return meta
def _FindLocal(self, local_ticker): # pragma: nocover
"""
Databases that support local tickers should override this method.
:param local_ticker: TickerLocal
:return:
"""
raise NotImplementedError('This database does not support local tickers')
def _FindDataType(self, datatype_ticker): # pragma: nocover
"""
:param datatype_ticker: TickerDataType
:return:
"""
raise NotImplementedError('This database does not support data type tickers')
def Exists(self, ticker): # pragma: nocover
"""
This method is only really needed by the non-complex databases.
:param ticker: str
:return: bool
"""
raise NotImplementedError()
def Retrieve(self, series_meta): # pragma: nocover
"""
:param series_meta: SeriesMetadata
:return: pandas.Series
"""
raise NotImplementedError()
def GetMeta(self, ticker_str):
ticker_obj = econ_platform_core.tickers.map_string_to_ticker(ticker_str)
if type(ticker_obj) is TickerLocal:
return self._FindLocal(ticker_obj)
if type(ticker_obj) is TickerDataType:
return self._FindDataType(ticker_obj)
meta = SeriesMetadata()
meta.ticker_local = None
meta.ticker_full = ticker_obj
meta.series_provider_code, meta.ticker_query = ticker_obj.SplitTicker()
meta.Exists = self.Exists(meta)
# Provider-specific meta data data not supported yet.
return meta
def GetLastRefresh(self, ticker_full):
"""
Get the last refresh datetime for a ticker.
Subclasses must implement this method, as it is the minimal information needed for an update
strategy.
:param ticker_full: TickerFull
:return:
"""
raise NotImplementedError()
def SetLastRefresh(self, ticker_full, time_stamp=None):
"""
Set the timestamp of the last refresh. Note that this will be called if an external provider is polled
and there is no new data.
If time_stamp is None, the manager should set to the current time.
This needs to be implemented by all database managers. The simplest manager (TEXT) will just touch the file
to reset the time stamp.
If the database manager is told to write the series, it is up to the database manager to set the LastRefresh
during the write operation. It is left to the manager, as it is likely that it will be more efficient to set the
status during the write operation.
:param ticker_full: TickerFull
:param time_stamp: datatime.datetime
:return:
"""
raise NotImplementedError()
def SetLastUpdate(self, ticker_full, time_stamp=None):
"""
Sets the last_update *and* last_refresh fields. If time_stamp is None, uses current time.
Called after a Write() by the UpdateProtocol, unless self.SetsLastUpdateAutomatically is True.
:param ticker_full: TickerFull
:param time_stamp: datatime.date
:return:
"""
raise NotImplementedError()
def RetrieveWithMeta(self, full_ticker):
"""
Retrieve both the meta data and the series. Have a single method in case there is
an optimisation for the database to do both queries at once.
Since we normally do not want the meta data at the same time, have the usual workflow to just
use the Retrieve() interface.
:param full_ticker: str
:return: list
"""
meta = self.GetMeta(full_ticker)
meta.AssertValid()
ser = self.Retrieve(meta)
return ser, meta
def Delete(self, series_meta): # pragma: nocover
"""
Delete a series.
:param series_meta: SeriesMetadata
:return:
"""
raise NotImplementedError()
def Write(self, ser, series_meta, overwrite=True): # pragma: nocover
"""
:param ser: pandas.Series
:param series_meta: SeriesMetadata
:param overwrite: bool
:return:
"""
raise NotImplementedError()
class DatabaseList(PlatformEntity):
"""
List of all Database managers. Developers can push their own DatabaseManagers into the global object.
"""
def __init__(self):
super().__init__()
self.DatabaseDict = {}
def Initialise(self):
pass
def AddDatabase(self, wrapper, code=None):
"""
Add a database. If the code is not supplied, uses the code based on the PlatformConfiguration
setting (normal use). Only need to supply the code for special cases, like having extra SQLite
database files for testing.
:param wrapper: DatabaseManager
:param code: str
:return:
"""
if code is None:
code = PlatformConfiguration['DatabaseList'][wrapper.Name]
wrapper.Code = code
self.DatabaseDict[wrapper.Code] = wrapper
def __getitem__(self, item):
"""
Access method.
:param item: str
:return: DatabaseManager
"""
if item.upper() == 'DEFAULT':
item = PlatformConfiguration['Database']['Default']
if item == 'SQL':
# If the DEFAULT is SQL, will be re-mapped twice!
item = PlatformConfiguration['Database']['SQL']
return self.DatabaseDict[item]
def TransferSeries(self, full_ticker, source, dest):
"""
Transfer a series from one database to another.
Useful for migrations and testing.
:param full_ticker: str
:param source: str
:param dest: str
:return:
"""
source_manager = self[source]
dest_manager = self[dest]
ser, meta = source_manager.RetrieveWithMeta(full_ticker)
# The meta information should be the same, except the Exists flag...
meta.Exists = dest_manager.Exists(full_ticker)
dest_manager.Write(ser, meta)
Databases = DatabaseList()
class ProviderWrapper(PlatformEntity):
"""
Data provider class. Note that we call them "providers" and not "sources" since the source is the
agency in the real world that calculates the data. The provider and the source can be the same - for example,
if we get Eurostat data from Eurostat itself. However, we can get Eurostat data from DB.nomics.
"""
Name: str
def __init__(self, name='VirtualObject', default_code=None):
super().__init__()
self.Name = name
self.ProviderCode = ''
self.IsExternal = True
# Are these data only pushed to the database? If so, never attempt to update from within a fetch.
self.PushOnly = False
# Sometimes we fetch an entire table as a side effect of fetching a series.
# A provider can mark this possibility by setting TableWasFetched to True, and
# loading up TableSeries, TableMeta with series/meta-data. The fetch() function will
# store the data.
self.TableWasFetched = False
self.TableSeries = {}
self.TableMeta = {}
self.WebPage = ''
if not name == 'VirtualObject':
try:
self.ProviderCode = PlatformConfiguration['ProviderList'][name]
except KeyError:
if default_code is None:
raise PlatformError(
'Must set the provider code in the config file under [ProviderList] for provider {0}'.format(name))
else:
self.ProviderCode = default_code
def fetch(self, series_meta): # pragma: nocover
"""
Fetch a series from a provider.
:param series_meta: SeriesMetadata
:return: pandas.Series
"""
raise NotImplementedError
def GetSeriesURL(self, series_meta):
"""
Get the URL for a series, if possible. Otherwise, returns the provider webpage.
:param series_meta: SeriesMetadata
:return: str
"""
try:
return self._GetSeriesUrlImplementation(series_meta)
except NotImplementedError:
return self.WebPage
def _GetSeriesUrlImplementation(self, series_meta):
"""
Implements the actual fetching. If a NotImplementedError is thrown, the object will return the
Provider.WebPage.
"""
raise NotImplementedError()
class ProviderList(PlatformEntity):
"""
List of all provider wrappers. Developers can push their own DatabaseManagers into the global object.
Keep the "User" provider also saved as a data member, since it will be accessed by code hooking into it.
"""
def __init__(self):
super().__init__()
self.ProviderDict = {}
self.EchoAccess = False
self.UserProvider = None
self.PushOnlyProvider = None
def Initialise(self):
self.EchoAccess = PlatformConfiguration['ProviderOptions'].getboolean('echo_access')
def AddProvider(self, obj):
"""
Add a provider
:param obj: ProviderWrapper
:return:
"""
self.ProviderDict[obj.ProviderCode] = obj
if obj.Name == 'User':
# Tuck the "User" provider into a known location.
self.UserProvider = obj
if obj.Name == 'PushOnly':
self.PushOnlyProvider = obj
def __getitem__(self, item):
"""
Access method
:param item: str
:return: ProviderWrapper
"""
# Need to convert to string since we are likely passed a ticker.
return self.ProviderDict[str(item)]
Providers = ProviderList()
UpdateProtocolList = UpdateProtocolManager()
def fetch(ticker, database='Default', dropna=True):
"""
Fetch a series from database; may create series and/or update as needed.
(May create a "fetchmany()" for fancier fetches that take a slice of the database.)
:param ticker: str
:param database: str
:param dropna: bool
:return: pandas.Series
"""
# Default handling is inide the database manager...
# if database.lower() == 'default':
# database = PlatformConfiguration["Database"]["Default"]
database_manager: DatabaseManager = Databases[database]
series_meta = database_manager.GetMeta(ticker)
series_meta.AssertValid()
provider_code = series_meta.series_provider_code
# noinspection PyPep8
try:
provider_manager: ProviderWrapper = Providers[provider_code]
except:
raise KeyError('Unknown provider_code: ' + str(provider_code)) from None
if series_meta.Exists:
# Return what is on the database.
global UpdateProtocolList
# TODO: Allow for choice of protocol.
return UpdateProtocolList["DEFAULT"].Update(ticker, series_meta, provider_manager, database_manager)
else:
return UpdateProtocolList["DEFAULT"].FetchAndWrite(ticker, series_meta, provider_manager, database_manager)
# if provider_manager.IsExternal:
# _hook_fetch_external(provider_manager, ticker)
# if provider_manager.PushOnly:
# raise PlatformError(
# 'Series {0} does not exist on {1}. Its ticker indicates that it is push-only series.'.format(
# ticker, database)) from None
# log_debug('Fetching %s', ticker)
# # Force this to False, so that ProviderManager extension writers do not need to
# # remember to do so.
# provider_manager.TableWasFetched = False
# if Providers.EchoAccess:
# print('Going to {0} to fetch {1}'.format(provider_manager.Name, ticker))
# try:
# out = provider_manager.fetch(series_meta)
# except TickerNotFoundError:
# # If the table was fetched, write the table, even if the specific series was not there...
# if provider_manager.TableWasFetched:
# for k in provider_manager.TableSeries:
# t_ser = provider_manager.TableSeries[k]
# t_meta = provider_manager.TableMeta[k]
# # Don't write the single series again..
# if str(t_meta.ticker_full) == str(series_meta.ticker_full):
# continue
# database_manager.Write(t_ser, t_meta)
# if not database_manager.SetsLastUpdateAutomatically:
# database_manager.SetLastUpdate(t_meta.ticker_full)
# raise
# if type(out) is not tuple:
# ser = out
# else:
# ser, series_meta = out
# if dropna:
# ser = ser.dropna()
# log('Writing %s', ticker)
# database_manager.Write(ser, series_meta)
# # Having this logic repeated three times is silly, but I want to force subclasses to
# # implement SetLastUpdate(), as otherwise update protocols will break.
# if not database_manager.SetsLastUpdateAutomatically:
# database_manager.SetLastUpdate(series_meta.ticker_full)
# if provider_manager.TableWasFetched:
# for k in provider_manager.TableSeries:
# t_ser = provider_manager.TableSeries[k]
# t_meta = provider_manager.TableMeta[k]
# # Don't write the single series again..
# if str(t_meta.ticker_full) == str(series_meta.ticker_full):
# continue
# database_manager.Write(t_ser, t_meta)
# if not database_manager.SetsLastUpdateAutomatically:
# database_manager.SetLastUpdate(t_meta.ticker_full)
#
# return ser
def fetch_df(ticker, database='Default', dropna=True):
"""
Return a DataFrame. Used by R.
As a convenience for R users, dumps the last error to the log file.
:param ticker: str
:param database: str
:param dropna: bool
:return: pandas.DataFrame
"""
# noinspection PyPep8
try:
ser = fetch(ticker, database, dropna)
df = | pandas.DataFrame({'series_dates': ser.index, 'series_values': ser.values}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 23:51:08 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import scipy.stats as stats
import itertools
from datetime import datetime, date
import os
import yfinance as yf
from functools import partial
from american_option_pricing import american_option
"""
#######################################################################################
Import Data
#######################################################################################
"""
data = pd.read_excel('data_v3.xlsx', index_col=None)
current_date = date(2020,7,24)
expiry_date = date(2020,8,7)
days_to_expiry = np.busday_count( current_date, expiry_date)-1
max_quantity_per_leg = 5
min_e_pnl = 0
min_p_profit = 30
max_cost = 750
max_loss = 750
mode = "rule_based" #"all_combinations"/"rule_based" - Always keep rule based
save_results = False
Strategies = ["Bear Call Spread","Bull Call Spread", \
"Bull Put Spread", "Bear Put Spread",\
"Bull Put Ladder", "Bear Call Ladder",\
"Long Straddle", "Long Strangle", \
"Long Strap", "Long Strip",\
# "Short Straddle", "Short Strangle", \
"Long Call Butterfly", "Long Put Butterfly",\
"Short Call Butterfly", "Short Put Butterfly",\
"Long Iron Butterfly", "Short Iron Butterfly",\
"Long Call Condor", "Long Put Condor", \
"Short Call Condor", "Short Put Condor", \
"Long Iron Condor", "Short Iron Condor", \
"Long Box"\
]
Strategies = []
"""
#######################################################################################
Get Risk Free Date
#######################################################################################
"""
#rf_eod_data = yf.download("^IRX", start="1993-01-01", end="2019-11-15")
rf_eod_data = yf.download("^IRX", start="2020-07-01", end= current_date.strftime("%Y-%m-%d"))
for col in rf_eod_data.columns:
rf_eod_data[col] = pd.to_numeric(rf_eod_data[col],errors='coerce')
rf_eod_data=rf_eod_data.fillna(method='ffill')
rf_eod_data['interest']=((1+(rf_eod_data['Adj Close']/100))**(1/252))-1
rf_eod_data['annualized_interest']=252*(((1+(rf_eod_data['Adj Close']/100))**(1/252))-1)
rf_value =rf_eod_data['annualized_interest'].iloc[-1]
print("Current Risk Free Rate is :",'{:.3f}%'.format(rf_value*100))
"""
#######################################################################################
Data Cleaning
#######################################################################################
"""
def wrang_1(df, col_names):
for col in col_names:
df[col] = df[col].str.rstrip('%')
df[col] = pd.to_numeric(df[col],errors='coerce')
df[col] = [float(x)/100.0 for x in df[col].values]
return df
convert_cols = ["Impl Vol", "Prob.ITM","Prob.OTM","Prob.Touch"]
data = wrang_1(data,convert_cols)
def label_type(row):
if row['Symbol'][0] == "." :
return 'Option'
return 'Stock'
data['Type']=data.apply(lambda row: label_type(row), axis=1)
data['Expiry_Date']= data.Symbol.str.extract('(\d+)')
data['Expiry_Date'] = data['Expiry_Date'].apply(lambda x: pd.to_datetime(str(x), format='%y%m%d'))
expiry_date_str = expiry_date.strftime("%Y%m%d")
data['Expiry_Date'] = data['Expiry_Date'].fillna(pd.Timestamp(expiry_date_str))
data['Expiry_Date'] = data['Expiry_Date'].apply(lambda x: x.strftime('%Y_%m_%d'))
#TODO: Change the logic for new symbol. Works only for this year.
data['Group']= data.Symbol.apply(lambda st: st[st.find(".")+1:st.find("20")])
data['Group'] = np.where(data['Type'] == "Stock", data['Symbol'],data['Group'])
data['Chain_ID'] = data['Group']+"_"+data['Expiry_Date']
data['Spread'] = data['Bid']-data['Ask']
stock_data = data[data['Type'] == "Stock"]
stock_data.rename(columns={"Description": "stock_Description",
"Last": "stock_Last",
"High":"stock_High",
"Low":"stock_Low",
"Open" : "stock_Open",
"Volume":"stock_Volume",
"Bid":"stock_Bid",
"Ask":"stock_Ask",
"Impl Vol":"stock_Impl_Vol",
"Spread":"stock_Spread"}, inplace=True)
stock_data = stock_data[["stock_Description","stock_Last","stock_High","stock_Low",
"stock_Open","stock_Volume","stock_Bid","stock_Ask",
"stock_Impl_Vol","stock_Spread","Chain_ID"]]
option_data = data[data['Type']=="Option"]
option_data['Option_type'] = option_data.loc[:,'Description'].str.split(' ').str[-1]
final_dataset = pd.merge(option_data,stock_data,on=['Chain_ID'])
"""
#######################################################################################
Option Chain Class
#######################################################################################
"""
class Option_chain(object):
def __init__(self,asset,final_dataset):
self.Name = asset
asset_df = final_dataset[final_dataset['Group']==asset]
asset_df["Dummy_Strike"] = asset_df.loc[:,"Strike"]
filtered = asset_df.groupby('Strike')['Strike'].filter(lambda x: len(x) == 2)
not_common = asset_df[~asset_df['Strike'].isin(filtered)]
records = not_common.to_dict('records')
to_add = list()
for record in records:
new = record.copy()
if record["Option_type"] == "CALL":
for keys,value in new.items():
if type(value) ==str :
if keys in ["Symbol","Description"]:
new[keys] = "Dummy"
if keys == "Option_type":
new[keys] = "PUT"
else:
if keys[:5] != "stock" and (keys not in ["Strike", "Dummy_Strike"]):
new[keys] = 0
new["Strike"] = -1000000
else:
for keys,value in new.items():
if type(value) ==str :
if keys in ["Symbol","Description"]:
new[keys] = "Dummy"
if keys == "Option_type":
new[keys] = "CALL"
else:
if keys[:5] != "stock" and (keys not in ["Strike", "Dummy_Strike"]):
new[keys] = 0
new["Strike"] = 1000000
to_add.append(new)
df_dummy_options = pd.DataFrame(to_add)
asset_df = asset_df.append(df_dummy_options)
asset_df = asset_df.sort_values(by=['Option_type', 'Dummy_Strike'])
greeks = ["Delta","Gamma","Theta","Vega","Rho"]
for greek in greeks:
setattr(self,"Var_"+greek, np.std(asset_df[greek].values))
self.lot_size = 100
self.call_df = asset_df[asset_df['Option_type']=="CALL"]
self.put_df = asset_df[asset_df['Option_type']=="PUT"]
cols = ["Bid","Ask", "Last", "Delta","Gamma","Theta","Vega","Rho","Impl Vol","Strike", "Dummy_Strike"]
for col in cols:
setattr(self,"Call_"+col,self.call_df[col].values)
setattr(self,"Put_"+col,self.put_df[col].values)
setattr(self,"Max_strike", np.max(self.Call_Strike))
setattr(self,"Min_strike", np.min(self.Call_Strike))
setattr(self,"Call_total", len(self.Call_Strike))
setattr(self,"Put_total", len(self.Put_Strike))
setattr(self,"Min_strike_width", np.min(np.diff(self.Call_Strike)))
setattr(self,"Max_strike_width", np.max(np.diff(self.Call_Strike)))
setattr(self,"Stock_Last", asset_df["stock_Last"].iloc[0])
setattr(self,"Description", asset_df["stock_Description"].iloc[0])
setattr(self,"Stock_Volt", asset_df["stock_Impl_Vol"].iloc[0])
setattr(self,"Time_to_exp", days_to_expiry)
std = self.Stock_Last*(self.Stock_Volt*((self.Time_to_exp/252)**0.5))
self.sigma = std
R_year = rf_value # Risk Free Interest Rate
self.Rf = rf_value
vol_day = (self.Stock_Volt)*math.sqrt(1/252)
ln_S_0 = math.log(self.Stock_Last)
rt = R_year*(self.Time_to_exp/252)
s_term= -0.5*(vol_day**2)*(self.Time_to_exp)
mu_n = ln_S_0+rt+s_term
sigma_n = vol_day*math.sqrt(self.Time_to_exp)
s = sigma_n
scale = math.exp(mu_n)
self.lognormal_s = s
self.lognormal_scale =scale
self.S_space = np.linspace(self.Stock_Last - 4*self.sigma, self.Stock_Last + 4*self.sigma, 20)
self.S_density = stats.lognorm.pdf(self.S_space,s,loc=0,scale=scale)
## for optimization
variables = ["Ask","Bid","Strike","Delta","Gamma","Theta","Vega","Rho"]
for variable in variables:
cal_cont = getattr(self,"Call_"+variable)
put_cont = getattr(self,"Put_"+variable)
array = np.hstack((cal_cont[:,np.newaxis],put_cont[:,np.newaxis]))
setattr(self, variable+"_List" , [[ array[i,j] for j in range(array.shape[1])] for i in range(array.shape[0])])
"""
#######################################################################################
Strategy Class
#######################################################################################
"""
class Strategy(object):
def __init__(self,allocation,chain,name):
self.Option_Chain = chain
self.Call_allocation = allocation[:,0]
self.Put_allocation = allocation[:,-1]
self.loss_threshold = 1000
self.vec_final_pnl = np.vectorize(self.final_pnl)
self.e_pnl = self.expected_pnl()
# self.e_utility = self.expected_utility()
self.pnl_space = self.pnl_space()
self.Max_Profit = max(self.pnl_space)
self.Max_Loss = -1*(min(self.pnl_space))
self.sigma = self.Option_Chain.sigma
self.name = name
self.Prob_profit = self.prob_profit()
self.Prob_loss = self.prob_loss()
Greeks = ["Delta","Gamma","Theta","Vega","Rho"]
for greek in Greeks:
call_att = getattr(self.Option_Chain, "Call_"+greek)
put_att = getattr(self.Option_Chain, "Put_"+greek)
call_c = self.Option_Chain.lot_size*(np.sum(self.Call_allocation*call_att))
put_c = self.Option_Chain.lot_size*(np.sum(self.Put_allocation*put_att))
setattr(self,"Strategy_"+greek, call_c+put_c)
# self.plot_pnl()
def payoff(self,S_T):
call_payoff = self.Option_Chain.lot_size*(np.sum(self.Call_allocation*np.maximum((S_T-self.Option_Chain.Call_Strike),0)))
put_payoff = self.Option_Chain.lot_size*(np.sum(self.Put_allocation*np.maximum((self.Option_Chain.Put_Strike-S_T),0)))
final_payoff = call_payoff+put_payoff
return final_payoff
def initial_cost(self):
call_cost = self.Option_Chain.Call_Ask*((self.Call_allocation>0).astype(int))+self.Option_Chain.Call_Bid*((self.Call_allocation<=0).astype(int))
put_cost = self.Option_Chain.Put_Ask*((self.Put_allocation>0).astype(int))+self.Option_Chain.Put_Bid*((self.Put_allocation<=0).astype(int))
total_call_cost = np.sum(self.Option_Chain.lot_size*(self.Call_allocation*call_cost))
total_put_cost = np.sum(self.Option_Chain.lot_size*(self.Put_allocation*put_cost))
return total_call_cost+total_put_cost
def final_pnl(self,S_T):
return self.payoff(S_T)-self.initial_cost()
def plot_pnl(self):
S = np.linspace(self.Option_Chain.Stock_Last - 4*self.sigma, self.Option_Chain.Stock_Last + 4*self.sigma, 1000)
pnl = self.vec_final_pnl(S)
max_loss = round(min(pnl),2)
e_pnl = self.expected_pnl()
fig,ax = plt.subplots(1, 1, figsize=(9, 5))
plt.plot(S,pnl,lw=2.5, color='blue', label = "Final PnL as a function of $S_T$")
plt.axhline(y=0, color="black", lw = 1)
plt.axhline(y=max_loss, color="red",linestyle='--', lw = 1, label = "Lowest Pnl = {}".format(max_loss))
plt.axhline(y = e_pnl , color = "magenta", linestyle='--', lw =1, label = "$E(Profit) : {}$".format(e_pnl) )
plt.axvline(x = self.Option_Chain.Stock_Last, color="green", linestyle='--', label = "$S_0 : {}$".format(self.Option_Chain.Stock_Last))
fmt = '${x:,.2f}'
tick = mtick.StrMethodFormatter(fmt)
ax.yaxis.set_major_formatter(tick)
ax.xaxis.set_major_formatter(tick)
plt.xlabel('$S_T$')
plt.ylabel('Final P & L ')
ax.set_axisbelow(True)
ax.minorticks_on()
ax.grid(which='major', linestyle='-')
ax.grid(which='minor', linestyle=':')
plt.legend()
plt.grid(True)
ax2 = ax.twinx()
color = 'tab:blue'
mu = self.Option_Chain.Stock_Last
sigma = self.sigma
s = self.Option_Chain.lognormal_s
scale = self.Option_Chain.lognormal_scale
ax2.set_ylabel('Pdf', color=color) # we already handled the x-label with ax1
ax2.vlines(x = mu+sigma, ymin=0, ymax = stats.lognorm.pdf(mu+sigma,s,0,scale), color=color, linestyle='--', label = "$\pm \sigma $")
ax2.vlines(x = mu-sigma, ymin=0, ymax = stats.lognorm.pdf(mu-sigma,s,0,scale),color=color, linestyle='--', label = "$\pm \sigma $")
ax2.vlines(x = mu+2*sigma, ymin=0, ymax = stats.lognorm.pdf(mu+2*sigma,s,0,scale),color=color, linestyle='--', label = "$\pm 2\sigma $")
ax2.vlines(x = mu-2*sigma, ymin=0, ymax = stats.lognorm.pdf(mu-2*sigma,s,0,scale),color=color, linestyle='--', label = "$\pm 2\sigma $")
ax2.vlines(x = mu+3*sigma, ymin=0, ymax = stats.lognorm.pdf(mu+3*sigma,s,0,scale),color=color, linestyle='--', label = "$\pm 3\sigma $")
ax2.vlines(x = mu-3*sigma, ymin=0, ymax = stats.lognorm.pdf(mu-3*sigma,s,0,scale),color=color, linestyle='--', label = "$\pm 3\sigma $")
ax2.plot(S, stats.lognorm.pdf(S, s, 0, scale), color=color, linestyle='--', lw = 2, label = "PDF of $S_T$")
ax2.tick_params(axis='y', labelcolor=color)
self.figure = fig
def e_curve(self):
s_paths = self.Option_Chain.S_space
s_paths_density = self.Option_Chain.S_density
final_pnl = self.vec_final_pnl(s_paths)
curve = s_paths_density*final_pnl
return curve
def utility_curve(self):
s_paths = self.Option_Chain.S_space
s_paths_density = self.Option_Chain.S_density
final_pnl = self.vec_final_pnl(s_paths)
curve = s_paths_density*np.exp(final_pnl)
return curve
def expected_utility(self):
curve = self.utility_curve()
expected_util = (self.Option_Chain.S_space[1]-self.Option_Chain.S_space[0])*(sum(curve)-0.5*curve[0]-0.5*curve[-1])
return round(expected_util,2)
def expected_pnl(self):
curve = self.e_curve()
h = (self.Option_Chain.S_space[1]-self.Option_Chain.S_space[0])
expected_pnl = h*(sum(curve)-0.5*curve[0]-0.5*curve[-1])
return round(expected_pnl,2)
def pnl_st(self):
return self.final_pnl(self.Option_Chain.Stock_Last)
def pnl_space(self):
return self.vec_final_pnl(self.Option_Chain.S_space)
def prob_profit(self):
S = self.Option_Chain.S_space
S_dens = self.Option_Chain.S_density
pnl_curve = self.pnl_space.copy()
S_dens_pos = S_dens.copy()
S_dens_pos[pnl_curve<=0] = 0
prob_profit = (S[1]-S[0])*(sum(S_dens_pos)- 0.5*S_dens_pos[0]-0.5*S_dens_pos[-1])
return round(100*prob_profit,2)
def prob_loss(self):
S = self.Option_Chain.S_space
S_dens = self.Option_Chain.S_density
pnl_curve = self.pnl_space.copy()
S_dens_neg = S_dens.copy()
S_dens_neg[pnl_curve>=0] = 0
prob_loss = (S[1]-S[0])*(sum(S_dens_neg)- 0.5*S_dens_neg[0]-0.5*S_dens_neg[-1])
return round(100*prob_loss, 2)
def summary(self):
strat_summary = dict()
strat_summary["Underlying"] = self.Option_Chain.Name
strat_summary["Name"] = self.name
strat_summary["Expected PnL"] = self.e_pnl
# strat_summary["Expected Utility"] = self.e_utility
strat_summary["Cost of Strategy"] = self.initial_cost()
strat_summary["Max_Profit"] = self.Max_Profit
strat_summary["Max_Loss"] = self.Max_Loss
strat_summary["Prob of Profit"] = self.Prob_profit
strat_summary["Prob of Loss"] = self.Prob_loss
strat_summary["Exp_Pnl/Max_Loss"]=self.e_pnl/self.Max_Loss
Greeks = ["Delta","Gamma","Theta","Vega","Rho"]
for greek in Greeks:
strat_summary["Strategy_"+greek] = getattr(self,"Strategy_"+greek)
Call_strikes = self.Option_Chain.Call_Strike
Put_strikes = self.Option_Chain.Put_Strike
for i in range(len(Call_strikes)):
strat_summary["C_"+str(Call_strikes[i])] = self.Call_allocation[i]
for i in range(len(Put_strikes)):
strat_summary["P_"+str(Put_strikes[i])] = self.Put_allocation[i]
return strat_summary
"""
#######################################################################################
Calculations
#######################################################################################
"""
Assets = list(final_dataset['Group'].unique())
All_Option_Chains = list()
for i in range(len(Assets)):
All_Option_Chains.append(Option_chain(Assets[i],final_dataset))
tic = datetime.now()
All_Strategies = list()
All_Strategies_Summary = list()
tic = datetime.now()
#for chain in All_Option_Chains:
for i in range(len(All_Option_Chains)):
chain = All_Option_Chains[i]
print("\n Processing ",i+1,"/",len(All_Option_Chains), "-", chain.Name, " : ", chain.Description)
Master_List_Strategies = list()
Master_List_Strategy_Summary = pd.DataFrame()
#chain = Option_chain(Assets[2],final_dataset)
if "Bull Call Spread" in Strategies:
Strategy_name ="Bull Call Spread"
print("\t Processing ", Strategy_name, " Strategy")
bull_call_spread_strat = list()
bull_call_spread = list()
if mode == "all_combinations":
call_1_pos = list(np.arange(chain.Call_total))
call_2_pos = list(np.arange(chain.Call_total))
call_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
call_2_quantity = list(-1*np.arange(1,max_quantity_per_leg+1))
iterables = [call_1_pos,call_2_pos,call_1_quantity,call_2_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, quan_1, quan_2 = t
if pos_1<pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,0] = quan_1
allocation[pos_2,0] = quan_2
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bull_call_spread_strat.append(strat)
bull_call_spread.append(details)
elif mode=="rule_based":
call_1_pos = list(np.arange(chain.Call_total))
call_2_pos = list(np.arange(chain.Call_total))
call_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [call_1_pos,call_2_pos,call_1_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, quan_1= t
if pos_1<pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,0] = quan_1
allocation[pos_2,0] = -1*quan_1
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bull_call_spread_strat.append(strat)
bull_call_spread.append(details)
if len(bull_call_spread)>0:
bull_call_spread_df = pd.DataFrame(bull_call_spread)
bull_call_spread_df = bull_call_spread_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(bull_call_spread_df)
Master_List_Strategies.append(bull_call_spread_strat)
print("\t \t Added ", len(bull_call_spread), " Strategies")
if "Bear Call Spread" in Strategies:
Strategy_name = "Bear Call Spread"
print("\t Processing ", Strategy_name, " Strategy")
bear_call_spread_strat = list()
bear_call_spread = list()
if mode == "all_combinations":
call_1_pos = list(np.arange(chain.Call_total))
call_2_pos = list(np.arange(chain.Call_total))
call_1_quantity = list(-1*np.arange(1,max_quantity_per_leg+1))
call_2_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [call_1_pos,call_2_pos,call_1_quantity,call_2_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, quan_1, quan_2 = t
if pos_1<pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,0] = quan_1
allocation[pos_2,0] = quan_2
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bear_call_spread_strat.append(strat)
bear_call_spread.append(details)
elif mode=="rule_based":
call_1_pos = list(np.arange(chain.Call_total))
call_2_pos = list(np.arange(chain.Call_total))
call_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [call_1_pos,call_2_pos,call_1_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, quan_1 = t
if pos_1<pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,0] = -1*quan_1
allocation[pos_2,0] = quan_1
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bear_call_spread_strat.append(strat)
bear_call_spread.append(details)
if len(bear_call_spread)>0:
bear_call_spread_df = pd.DataFrame(bear_call_spread)
bear_call_spread_df = bear_call_spread_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(bear_call_spread_df)
Master_List_Strategies.append(bear_call_spread_strat)
print("\t \t Added ", len(bear_call_spread), " Strategies")
if "Bull Put Spread" in Strategies:
Strategy_name ="Bull Put Spread"
print("\t Processing ", Strategy_name, " Strategy")
bull_put_spread_strat = list()
bull_put_spread = list()
if mode=="all_combinations":
put_1_pos = list(np.arange(chain.Call_total))
put_2_pos = list(np.arange(chain.Call_total))
put_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
put_2_quantity = list(-1*np.arange(1,max_quantity_per_leg+1))
iterables = [put_1_pos,put_2_pos,put_1_quantity,put_2_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, quan_1, quan_2 = t
if pos_1<pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,1] = quan_1
allocation[pos_2,1] = quan_2
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bull_put_spread_strat.append(strat)
bull_put_spread.append(details)
elif mode=="rule_based":
put_1_pos = list(np.arange(chain.Call_total))
put_2_pos = list(np.arange(chain.Call_total))
put_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [put_1_pos,put_2_pos,put_1_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, quan_1 = t
if pos_1<pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,1] = quan_1
allocation[pos_2,1] = -1*quan_1
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bull_put_spread_strat.append(strat)
bull_put_spread.append(details)
if len(bull_put_spread)>0:
bull_put_spread_df = pd.DataFrame(bull_put_spread)
bull_put_spread_df = bull_put_spread_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(bull_put_spread_df)
Master_List_Strategies.append(bull_put_spread_strat)
print("\t \t Added ", len(bull_put_spread), " Strategies")
if "Bear Put Spread" in Strategies:
Strategy_name ="Bear Put Spread"
print("\t Processing ", Strategy_name, " Strategy")
bear_put_spread_strat = list()
bear_put_spread = list()
if mode=="all_combinations":
put_1_pos = list(np.arange(chain.Call_total))
put_2_pos = list(np.arange(chain.Call_total))
put_1_quantity = list(-1*np.arange(1,max_quantity_per_leg+1))
put_2_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [put_1_pos,put_2_pos,put_1_quantity,put_2_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, quan_1, quan_2 = t
if pos_1<pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,1] = quan_1
allocation[pos_2,1] = quan_2
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bear_put_spread_strat.append(strat)
bear_put_spread.append(details)
elif mode=="rule_based":
put_1_pos = list(np.arange(chain.Call_total))
put_2_pos = list(np.arange(chain.Call_total))
put_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [put_1_pos,put_2_pos,put_1_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, quan_1 = t
if pos_1<pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,1] = -1*quan_1
allocation[pos_2,1] = quan_1
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bear_put_spread_strat.append(strat)
bear_put_spread.append(details)
if len(bear_put_spread)>0:
bear_put_spread_df = pd.DataFrame(bear_put_spread)
bear_put_spread_df = bear_put_spread_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(bear_put_spread_df)
Master_List_Strategies.append(bear_put_spread_strat)
print("\t \t Added ", len(bear_put_spread), " Strategies")
## TODO: Add Call/Put Back/Ratio Spreads
## TODO: Add Other 2 ladders
if "Bull Put Ladder" in Strategies:
Strategy_name ="Bull Put Ladder"
print("\t Processing ", Strategy_name, " Strategy")
bull_put_ladder_strat = list()
bull_put_ladder = list()
if mode=="all_combinations":
put_1_pos = list(np.arange(chain.Call_total))
put_2_pos = list(np.arange(chain.Call_total))
put_3_pos = list(np.arange(chain.Call_total))
put_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
put_2_quantity = list(np.arange(1,max_quantity_per_leg+1))
put_3_quantity = list(-1*np.arange(1,max_quantity_per_leg+1))
iterables = [put_1_pos,put_2_pos, put_3_pos, put_1_quantity,put_2_quantity, put_3_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, pos_3, quan_1, quan_2, quan_3 = t
if pos_1<pos_2 and pos_2 < pos_3 :
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,1] = quan_1
allocation[pos_2,1] = quan_2
allocation[pos_3,1] = quan_3
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bull_put_ladder_strat.append(strat)
bull_put_ladder.append(details)
elif mode=="rule_based":
put_1_pos = list(np.arange(chain.Call_total))
put_2_pos = list(np.arange(chain.Call_total))
put_3_pos = list(np.arange(chain.Call_total))
put_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
put_2_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [put_1_pos,put_2_pos, put_3_pos, put_1_quantity,put_2_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, pos_3, quan_1, quan_2 = t
if pos_1<pos_2 and pos_2 < pos_3 :
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,1] = quan_1
allocation[pos_2,1] = quan_1
allocation[pos_3,1] = -1*quan_2
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bull_put_ladder_strat.append(strat)
bull_put_ladder.append(details)
if len(bull_put_ladder)>0:
bull_put_ladder_df = pd.DataFrame(bull_put_ladder)
bull_put_ladder_df = bull_put_ladder_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(bull_put_ladder_df)
Master_List_Strategies.append(bull_put_ladder_strat)
print("\t \t Added ", len(bull_put_ladder), " Strategies")
if "Bear Call Ladder" in Strategies:
Strategy_name ="Bear Call Ladder"
print("\t Processing ", Strategy_name, " Strategy")
bear_call_ladder_strat = list()
bear_call_ladder = list()
if mode=="all_combinations":
call_1_pos = list(np.arange(chain.Call_total))
call_2_pos = list(np.arange(chain.Call_total))
call_3_pos = list(np.arange(chain.Call_total))
call_1_quantity = list(-1*np.arange(1,max_quantity_per_leg+1))
call_2_quantity = list(np.arange(1,max_quantity_per_leg+1))
call_3_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [call_1_pos,call_2_pos, call_3_pos, call_1_quantity,call_2_quantity, call_3_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, pos_3, quan_1, quan_2, quan_3 = t
if pos_1<pos_2 and pos_2 < pos_3 :
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,0] = quan_1
allocation[pos_2,0] = quan_2
allocation[pos_3,0] = quan_3
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bear_call_ladder_strat.append(strat)
bear_call_ladder.append(details)
elif mode=="rule_based":
call_1_pos = list(np.arange(chain.Call_total))
call_2_pos = list(np.arange(chain.Call_total))
call_3_pos = list(np.arange(chain.Call_total))
call_1_quantity = list(np.arange(1,max_quantity_per_leg+1))
call_2_quantity = list(np.arange(1,max_quantity_per_leg+1))
iterables = [call_1_pos,call_2_pos, call_3_pos, call_1_quantity,call_2_quantity]
for t in itertools.product(*iterables):
pos_1, pos_2, pos_3, quan_1, quan_2= t
if pos_1<pos_2 and pos_2 < pos_3 :
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,0] = -1*quan_1
allocation[pos_2,0] = quan_2
allocation[pos_3,0] = quan_2
strat = Strategy(allocation,chain,Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
bear_call_ladder_strat.append(strat)
bear_call_ladder.append(details)
if len(bear_call_ladder)>0:
bear_call_ladder_df = pd.DataFrame(bear_call_ladder)
bear_call_ladder_df = bear_call_ladder_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(bear_call_ladder_df)
Master_List_Strategies.append(bear_call_ladder_strat)
print("\t \t Added ", len(bear_call_ladder), " Strategies")
if "Long Straddle" in Strategies:
Strategy_name = "Long Straddle"
print("\t Processing ", Strategy_name, " Strategy")
pos = list(np.arange(chain.Call_total))
quan = list(np.arange(1,max_quantity_per_leg+1))
iterables = [pos,quan]
long_straddle_strat = list()
long_straddle = list()
for t in itertools.product(*iterables):
pos,quan_1 = t
allocation = np.zeros((chain.Call_total,2))
allocation[pos,0] = quan_1
allocation[pos,1] = quan_1
strat = Strategy(allocation, chain, Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
long_straddle_strat.append(strat)
long_straddle.append(details)
if len(long_straddle)>0:
long_straddle_df = pd.DataFrame(long_straddle)
long_straddle_df = long_straddle_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(long_straddle_df)
Master_List_Strategies.append(long_straddle_strat)
print("\t \t Added ", len(long_straddle), " Strategies")
if "Long Strap" in Strategies:
Strategy_name = "Long Strap"
print("\t Processing ", Strategy_name, " Strategy")
pos = list(np.arange(chain.Call_total))
call_quan = list(np.arange(1,max_quantity_per_leg+1))
put_quan = list(np.arange(1,max_quantity_per_leg+1))
iterables = [pos,call_quan, put_quan]
long_strap_strat = list()
long_strap = list()
for t in itertools.product(*iterables):
pos,quan_1, quan_2 = t
if quan_1 > quan_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos,0] = quan_1
allocation[pos,1] = quan_2
strat = Strategy(allocation, chain, Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
long_strap_strat.append(strat)
long_strap.append(details)
if len(long_strap)>0:
long_strap_df = pd.DataFrame(long_strap)
long_strap_df = long_strap_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(long_strap_df)
Master_List_Strategies.append(long_strap_strat)
print("\t \t Added ", len(long_strap), " Strategies")
if "Long Strip" in Strategies:
Strategy_name = "Long Strip"
print("\t Processing ", Strategy_name, " Strategy")
pos = list(np.arange(chain.Call_total))
call_quan = list(np.arange(1,max_quantity_per_leg+1))
put_quan = list(np.arange(1,max_quantity_per_leg+1))
iterables = [pos,call_quan, put_quan]
long_strip_strat = list()
long_strip = list()
for t in itertools.product(*iterables):
pos,quan_1, quan_2 = t
if quan_1 < quan_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos,0] = quan_1
allocation[pos,1] = quan_2
strat = Strategy(allocation, chain, Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
long_strip_strat.append(strat)
long_strip.append(details)
if len(long_strip)>0:
long_strip_df = pd.DataFrame(long_strip)
long_strip_df = long_strip_df.sort_values(by=["Exp_Pnl/Max_Loss","Max_Profit"], ascending=False)
Master_List_Strategy_Summary = Master_List_Strategy_Summary.append(long_strip_df)
Master_List_Strategies.append(long_strip_strat)
print("\t \t Added ", len(long_strip), " Strategies")
if "Long Strangle" in Strategies:
Strategy_name = "Long Strangle"
print("\t Processing ", Strategy_name, " Strategy")
call_pos = list(np.arange(chain.Call_total))
put_pos = list(np.arange(chain.Call_total))
quan = list(np.arange(1,max_quantity_per_leg+1))
iterables = [call_pos,put_pos, quan]
long_strangle_strat = list()
long_strangle = list()
for t in itertools.product(*iterables):
pos_1,pos_2,quan_1 = t
if pos_1 < pos_2:
allocation = np.zeros((chain.Call_total,2))
allocation[pos_1,0] = quan_1
allocation[pos_2,1] = quan_1
strat = Strategy(allocation, chain, Strategy_name)
details = strat.summary()
if details["Expected PnL"] > min_e_pnl and details["Prob of Profit"]>min_p_profit and details["Cost of Strategy"] <max_cost and details["Max_Loss"]<max_loss :
long_strangle_strat.append(strat)
long_strangle.append(details)
if len(long_strangle)>0:
long_strangle_df = | pd.DataFrame(long_strangle) | pandas.DataFrame |
__author__ = 'saeedamen' # <NAME>
#
# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
from financepy.market.curves.FinDiscountCurveFlat import FinDiscountCurveFlat
from financepy.market.volatility.FinFXVolSurface import FinFXVolSurface
from financepy.market.volatility.FinFXVolSurface import FinFXATMMethod
from financepy.market.volatility.FinFXVolSurface import FinFXDeltaMethod
from financepy.market.volatility.FinFXVolSurface import volFunctionFAST
from financepy.market.volatility.FinFXVolSurface import FinVolFunctionTypes
from findatapy.util.dataconstants import DataConstants
from finmarketpy.curve.volatility.abstractvolsurface import AbstractVolSurface
from finmarketpy.util.marketconstants import MarketConstants
from finmarketpy.util.marketutil import MarketUtil
data_constants = DataConstants()
market_constants = MarketConstants()
import pandas as pd
class FXVolSurface(AbstractVolSurface):
"""Holds data for an FX vol surface and also interpolates vol surface, converts strikes to implied_vol vols etc.
"""
def __init__(self, market_df=None, asset=None, field='close', tenors=data_constants.fx_vol_tenor):
self._market_df = market_df
self._tenors = tenors
self._asset = asset
self._field = field
self._market_util = MarketUtil()
self._value_date = None
self._fin_fx_vol_surface = None
self._df_vol_dict = None
self._vol_function_type = FinVolFunctionTypes.CLARKE.value
def build_vol_surface(self, value_date, asset=None, depo_tenor='1M', field=None, atm_method=FinFXATMMethod.FWD_DELTA_NEUTRAL,
delta_method=FinFXDeltaMethod.SPOT_DELTA):
"""Builds the implied_vol volatility for a particular value date and calculates the benchmark strikes etc.
Before we do any sort of interpolation later, we need to build the implied_vol vol surface.
Parameters
----------
value_date : str
Value data (need to have market data for this date)
asset : str
Asset name
depo_tenor : str
Depo tenor to use
default - '1M'
field : str
Market data field to use
default - 'close'
atm_method : FinFXATMMethod
How is the ATM quoted? Eg. delta neutral, ATMF etc.
default - FinFXATMMethod.FWD_DELTA_NEUTRAL
delta_method : FinFXDeltaMethod
Spot delta, forward delta etc.
default - FinFXDeltaMethod.SPOT_DELTA
"""
value_date = self._market_util.parse_date(value_date)
self._value_date = value_date
market_df = self._market_df
value_fin_date = self._findate(self._market_util.parse_date(value_date))
tenors = self._tenors
# Change ON (overnight) to 1D (convention for financepy)
# tenors_financepy = list(map(lambda b: b.replace("ON", "1D"), self._tenors.copy()))
tenors_financepy = self._tenors.copy()
if field is None:
field = self._field
field = '.' + field
if asset is None:
asset = self._asset
for_name_base = asset[0:3]
dom_name_terms = asset[3:6]
notional_currency = for_name_base
date_index = market_df.index == value_date
# CAREFUL: need to divide by 100 for depo rate, ie. 0.0346 = 3.46%
forCCRate = market_df[for_name_base + depo_tenor + field][date_index].values[0] / 100.0 # 0.03460 # EUR
domCCRate = market_df[dom_name_terms + depo_tenor + field][date_index].values[0] / 100.0 # 0.02940 # USD
currency_pair = for_name_base + dom_name_terms
spot_fx_rate = float(market_df[currency_pair + field][date_index].values[0])
# For vols we do NOT need to divide by 100 (financepy does that internally)
atm_vols = market_df[[currency_pair + "V" + t + field for t in tenors]][date_index].values[0]
market_strangle25DeltaVols = market_df[[currency_pair + "25B" + t + field for t in tenors]][date_index].values[0] #[0.65, 0.75, 0.85, 0.90, 0.95, 0.85]
risk_reversal25DeltaVols = market_df[[currency_pair + "25R" + t + field for t in tenors]][date_index].values[0] #[-0.20, -0.25, -0.30, -0.50, -0.60, -0.562]
market_strangle10DeltaVols = market_df[[currency_pair + "10B" + t + field for t in tenors]][date_index].values[0]
risk_reversal10DeltaVols = market_df[[currency_pair + "10R" + t + field for t in tenors]][date_index].values[0]
dom_discount_curve = FinDiscountCurveFlat(value_fin_date, domCCRate)
for_discount_curve = FinDiscountCurveFlat(value_fin_date, forCCRate)
use_only_25d = True
# Construct financepy vol surface (uses polynomial interpolation for determining vol between strikes)
if use_only_25d:
self._fin_fx_vol_surface = FinFXVolSurface(value_fin_date,
spot_fx_rate,
currency_pair,
notional_currency,
dom_discount_curve,
for_discount_curve,
tenors_financepy,
atm_vols,
market_strangle25DeltaVols,
risk_reversal25DeltaVols,
atm_method,
delta_method)
else:
# New implementation in FinancePy also uses 10d for interpolation
from financepy.market.volatility.FinFXVolSurfacePlus import FinFXVolSurfacePlus
self._fin_fx_vol_surface = FinFXVolSurfacePlus(value_fin_date,
spot_fx_rate,
currency_pair,
notional_currency,
dom_discount_curve,
for_discount_curve,
tenors_financepy,
atm_vols,
market_strangle25DeltaVols,
risk_reversal25DeltaVols,
market_strangle10DeltaVols,
risk_reversal10DeltaVols,
atm_method,
delta_method)
def calculate_vol_for_strike_expiry(self, K, expiry_date=None, tenor='1M'):
"""Calculates the implied_vol volatility for a given strike
Parameters
----------
K : float
Strike for which to find implied_vol volatility
expiry_date : str (optional)
Expiry date of option (TODO not implemented)
tenor : str (optional)
Tenor of option
default - '1M'
Returns
-------
"""
# TODO interpolate for broken dates, not just quoted tenors
if tenor is not None:
try:
tenor_index = self._get_tenor_index(tenor)
return self.vol_function(K, tenor_index)
except:
pass
return None
def extract_vol_surface(self, num_strike_intervals=60):
"""Creates an interpolated implied vol surface which can be plotted (in strike space), and also in delta
space for key strikes (ATM, 25d call and put). Also for key strikes converts from delta to strike space.
Parameters
----------
num_strike_intervals : int
Number of points to interpolate
Returns
-------
dict
"""
## Modified from FinancePy code for plotting vol curves
# columns = tenors
df_vol_surface_strike_space = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
df_vol_surface_delta_space = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
# columns = tenors
df_vol_surface_implied_pdf = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
# Conversion between main deltas and strikes
df_deltas_vs_strikes = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
# ATM, 25d market strangle and 25d risk reversals
df_vol_surface_quoted_points = pd.DataFrame(columns=self._fin_fx_vol_surface._tenors)
# Note, at present we're not using 10d strikes
quoted_strikes_names = ['ATM', 'STR_25D_MS', 'RR_25D_P']
key_strikes_names = ['K_25D_P', 'K_25D_P_MS', 'ATM', 'K_25D_C', 'K_25D_C_MS']
# Get max/min strikes to interpolate (from the longest dated tenor)
low_K = self._fin_fx_vol_surface._K_25D_P[-1] * 0.95
high_K = self._fin_fx_vol_surface._K_25D_C[-1] * 1.05
# In case using old version of FinancePy
try:
implied_pdf_fin_distribution = self._fin_fx_vol_surface.impliedDbns(low_K, high_K, num_strike_intervals)
except:
pass
for tenor_index in range(0, self._fin_fx_vol_surface._numVolCurves):
# Get the quoted vol points
tenor_label = self._fin_fx_vol_surface._tenors[tenor_index]
atm_vol = self._fin_fx_vol_surface._atmVols[tenor_index] * 100
ms_25d_vol = self._fin_fx_vol_surface._mktStrangle25DeltaVols[tenor_index] * 100
rr_10d_vol = self._fin_fx_vol_surface._riskReversal25DeltaVols[tenor_index] * 100
df_vol_surface_quoted_points[tenor_label] = pd.Series(index=quoted_strikes_names, data=[atm_vol, ms_25d_vol, rr_10d_vol])
# Do interpolation in strike space for the implied_vol vols
strikes = []
vols = []
if num_strike_intervals is not None:
K = low_K
dK = (high_K - low_K) / num_strike_intervals
for i in range(0, num_strike_intervals):
sigma = self.vol_function(K, tenor_index) * 100.0
strikes.append(K)
vols.append(sigma)
K = K + dK
df_vol_surface_strike_space[tenor_label] = pd.Series(index=strikes, data=vols)
try:
df_vol_surface_implied_pdf[tenor_label] = pd.Series(index=implied_pdf_fin_distribution[tenor_index]._x,
data=implied_pdf_fin_distribution[tenor_index]._densitydx)
except:
pass
# Extract strikes for the quoted points (ie. 25d and ATM)
key_strikes = []
key_strikes.append(self._fin_fx_vol_surface._K_25D_P[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_25D_P_MS[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_ATM[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_25D_C[tenor_index])
key_strikes.append(self._fin_fx_vol_surface._K_25D_C_MS[tenor_index])
df_deltas_vs_strikes[tenor_label] = | pd.Series(index=key_strikes_names, data=key_strikes) | pandas.Series |
import warnings
warnings.simplefilter(action = 'ignore', category = UserWarning)
# Front matter
import os
import glob
import re
import pandas as pd
import numpy as np
import scipy.constants as constants
import sympy as sp
from sympy import Matrix, Symbol
from sympy.utilities.lambdify import lambdify
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib import gridspec
# Seaborn, useful for graphics
import seaborn as sns
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('ytick', labelsize=14)
rc = {'lines.linewidth': 1,
'axes.labelsize': 20,
'axes.titlesize': 20,
'legend.fontsize': 26,
'xtick.direction': u'in',
'ytick.direction': u'in'}
sns.set_style('ticks', rc=rc)
# Functions
def calc_V_bcc(a):
return a**3
def calc_V_hcp(a,c):
return (np.sqrt(3)/2)*a**2*c
def calc_dV_bcc(a,da):
return 3*a**2*da
def calc_dV_hcp(a,c,da,dc):
return np.sqrt( (np.sqrt(3)*a*c*da)**2 + ((np.sqrt(3)/2)*a**2*dc)**2 )
# Numeric Vinet EOS, used for everything except calculating dP
def VinetEOS(V,V0,K0,Kprime0):
A = V/V0
P = 3*K0*A**(-2/3) * (1-A**(1/3)) * np.exp((3/2)*(Kprime0-1)*(1-A**(1/3)))
return P
# Symbolic Vinet EOS, needed to calculate dP
def VinetEOS_sym(V,V0,K0,Kprime0):
A = V/V0
P = 3*K0*A**(-2/3) * (1-A**(1/3)) * sp.exp((3/2)*(Kprime0-1)*(1-A**(1/3)))
return P
# Create a covariance matrix from EOS_df with V0, K0, and K0prime; used to get dP
def getCov3(EOS_df, phase):
dV0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dV0'])
dK0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dK0'])
dKprime0 = np.float(EOS_df[EOS_df['Phase'] == phase]['dKprime0'])
V0K0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['V0K0 corr'])
V0Kprime0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['V0Kprime0 corr'])
K0Kprime0_corr = np.float(EOS_df[EOS_df['Phase'] == phase]['K0Kprime0 corr'])
corr_matrix = np.eye(3)
corr_matrix[0,1] = V0K0_corr
corr_matrix[1,0] = V0K0_corr
corr_matrix[0,2] = V0Kprime0_corr
corr_matrix[2,0] = V0Kprime0_corr
corr_matrix[1,2] = K0Kprime0_corr
corr_matrix[2,1] = K0Kprime0_corr
sigmas = np.array([[dV0,dK0,dKprime0]])
cov = (sigmas.T@sigmas)*corr_matrix
return cov
# Create a covariance matrix with V, V0, K0, and K0prime; used to get dP
def getVinetCov(dV, EOS_df, phase):
cov3 = getCov3(EOS_df, phase)
cov = np.eye(4)
cov[1:4,1:4] = cov3
cov[0,0] = dV**2
return cov
def calc_dP_VinetEOS(V, dV, EOS_df, phase):
# Create function for Jacobian of Vinet EOS
a,b,c,d = Symbol('a'),Symbol('b'),Symbol('c'),Symbol('d') # Symbolic variables V, V0, K0, K'0
Vinet_matrix = Matrix([VinetEOS_sym(a,b,c,d)]) # Create a symbolic Vinet EOS matrix
param_matrix = Matrix([a,b,c,d]) # Create a matrix of symbolic variables
# Symbolically take the Jacobian of the Vinet EOS and turn into a column matrix
J_sym = Vinet_matrix.jacobian(param_matrix).T
# Create a numpy function for the above expression
# (easier to work with numerically)
J_Vinet = lambdify((a,b,c,d), J_sym, 'numpy')
J = J_Vinet(V,*getEOSparams(EOS_df, phase)) # Calculate Jacobian
cov = getVinetCov(dV, EOS_df, phase) # Calculate covariance matrix
dP = (J.T@cov@J).item() # Calculate uncertainty and convert to a scalar
return dP
def getEOSparams(EOS_df, phase):
V0 = np.float(EOS_df[EOS_df['Phase'] == phase]['V0'])
K0 = np.float(EOS_df[EOS_df['Phase'] == phase]['K0'])
Kprime0 = np.float(EOS_df[EOS_df['Phase'] == phase]['Kprime0'])
return V0, K0, Kprime0
def calc_rho(V,dV,M):
# Convert from cubic angstroms to cm^3/mol
V_ccpermol = (V/2)*constants.N_A/(10**24)
rho = M/V_ccpermol
drho = (M*2*10**24/constants.N_A)*(dV/(V**2))
return rho, drho
# Import EOS information
EOS_df = pd.read_csv('FeAlloyEOS.csv')
# Find the filepath of all .xy XRD pattern files
patternfilepath_list = [filepath for filepath in glob.glob('*/*/*.xy')]
allresults_df = pd.DataFrame()
bccFe_df = | pd.DataFrame() | pandas.DataFrame |
from collections import deque
from datetime import datetime
import operator
import numpy as np
import pytest
import pytz
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import _check_mixed_float, _check_mixed_int
# -------------------------------------------------------------------
# Comparisons
class TestFrameComparisons:
# Specifically _not_ flex-comparisons
def test_frame_in_list(self):
# GH#12689 this should raise at the DataFrame level, not blocks
df = pd.DataFrame(np.random.randn(6, 4), columns=list("ABCD"))
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df in [None]
def test_comparison_invalid(self):
def check(df, df2):
for (x, y) in [(df, df2), (df2, df)]:
# we expect the result to match Series comparisons for
# == and !=, inequalities should raise
result = x == y
expected = pd.DataFrame(
{col: x[col] == y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
result = x != y
expected = pd.DataFrame(
{col: x[col] != y[col] for col in x.columns},
index=x.index,
columns=x.columns,
)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError):
x >= y
with pytest.raises(TypeError):
x > y
with pytest.raises(TypeError):
x < y
with pytest.raises(TypeError):
x <= y
# GH4968
# invalid date/int comparisons
df = pd.DataFrame(np.random.randint(10, size=(10, 1)), columns=["a"])
df["dates"] = pd.date_range("20010101", periods=len(df))
df2 = df.copy()
df2["dates"] = df["a"]
check(df, df2)
df = pd.DataFrame(np.random.randint(10, size=(10, 2)), columns=["a", "b"])
df2 = pd.DataFrame(
{
"a": pd.date_range("20010101", periods=len(df)),
"b": pd.date_range("20100101", periods=len(df)),
}
)
check(df, df2)
def test_timestamp_compare(self):
# make sure we can compare Timestamps on the right AND left hand side
# GH#4982
df = pd.DataFrame(
{
"dates1": pd.date_range("20010101", periods=10),
"dates2": pd.date_range("20010102", periods=10),
"intcol": np.random.randint(1000000000, size=10),
"floatcol": np.random.randn(10),
"stringcol": list(tm.rands(10)),
}
)
df.loc[np.random.rand(len(df)) > 0.5, "dates2"] = pd.NaT
ops = {"gt": "lt", "lt": "gt", "ge": "le", "le": "ge", "eq": "eq", "ne": "ne"}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# no nats
if left in ["eq", "ne"]:
expected = left_f(df, pd.Timestamp("20010109"))
result = right_f(pd.Timestamp("20010109"), df)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(TypeError):
left_f(df, pd.Timestamp("20010109"))
with pytest.raises(TypeError):
right_f(pd.Timestamp("20010109"), df)
# nats
expected = left_f(df, pd.Timestamp("nat"))
result = right_f(pd.Timestamp("nat"), df)
tm.assert_frame_equal(result, expected)
def test_mixed_comparison(self):
# GH#13128, GH#22163 != datetime64 vs non-dt64 should be False,
# not raise TypeError
# (this appears to be fixed before GH#22163, not sure when)
df = pd.DataFrame([["1989-08-01", 1], ["1989-08-01", 2]])
other = pd.DataFrame([["a", "b"], ["c", "d"]])
result = df == other
assert not result.any().any()
result = df != other
assert result.all().all()
def test_df_boolean_comparison_error(self):
# GH#4576, GH#22880
# comparing DataFrame against list/tuple with len(obj) matching
# len(df.columns) is supported as of GH#22800
df = pd.DataFrame(np.arange(6).reshape((3, 2)))
expected = pd.DataFrame([[False, False], [True, False], [False, False]])
result = df == (2, 2)
tm.assert_frame_equal(result, expected)
result = df == [2, 2]
tm.assert_frame_equal(result, expected)
def test_df_float_none_comparison(self):
df = pd.DataFrame(
np.random.randn(8, 3), index=range(8), columns=["A", "B", "C"]
)
result = df.__eq__(None)
assert not result.any().any()
def test_df_string_comparison(self):
df = pd.DataFrame([{"a": 1, "b": "foo"}, {"a": 2, "b": "bar"}])
mask_a = df.a > 1
tm.assert_frame_equal(df[mask_a], df.loc[1:1, :])
tm.assert_frame_equal(df[-mask_a], df.loc[0:0, :])
mask_b = df.b == "foo"
tm.assert_frame_equal(df[mask_b], df.loc[0:0, :])
tm.assert_frame_equal(df[-mask_b], df.loc[1:1, :])
class TestFrameFlexComparisons:
# TODO: test_bool_flex_frame needs a better name
def test_bool_flex_frame(self):
data = np.random.randn(5, 3)
other_data = np.random.randn(5, 3)
df = pd.DataFrame(data)
other = pd.DataFrame(other_data)
ndim_5 = np.ones(df.shape + (1, 3))
# Unaligned
def _check_unaligned_frame(meth, op, df, other):
part_o = other.loc[3:, 1:].copy()
rs = meth(part_o)
xp = op(df, part_o.reindex(index=df.index, columns=df.columns))
tm.assert_frame_equal(rs, xp)
# DataFrame
assert df.eq(df).values.all()
assert not df.ne(df).values.any()
for op in ["eq", "ne", "gt", "lt", "ge", "le"]:
f = getattr(df, op)
o = getattr(operator, op)
# No NAs
tm.assert_frame_equal(f(other), o(df, other))
_check_unaligned_frame(f, o, df, other)
# ndarray
tm.assert_frame_equal(f(other.values), o(df, other.values))
# scalar
tm.assert_frame_equal(f(0), o(df, 0))
# NAs
msg = "Unable to coerce to Series/DataFrame"
tm.assert_frame_equal(f(np.nan), o(df, np.nan))
with pytest.raises(ValueError, match=msg):
f(ndim_5)
# Series
def _test_seq(df, idx_ser, col_ser):
idx_eq = df.eq(idx_ser, axis=0)
col_eq = df.eq(col_ser)
idx_ne = df.ne(idx_ser, axis=0)
col_ne = df.ne(col_ser)
tm.assert_frame_equal(col_eq, df == pd.Series(col_ser))
tm.assert_frame_equal(col_eq, -col_ne)
tm.assert_frame_equal(idx_eq, -idx_ne)
tm.assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)
tm.assert_frame_equal(col_eq, df.eq(list(col_ser)))
tm.assert_frame_equal(idx_eq, df.eq(pd.Series(idx_ser), axis=0))
tm.assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))
idx_gt = df.gt(idx_ser, axis=0)
col_gt = df.gt(col_ser)
idx_le = df.le(idx_ser, axis=0)
col_le = df.le(col_ser)
tm.assert_frame_equal(col_gt, df > pd.Series(col_ser))
tm.assert_frame_equal(col_gt, -col_le)
tm.assert_frame_equal(idx_gt, -idx_le)
tm.assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)
idx_ge = df.ge(idx_ser, axis=0)
col_ge = df.ge(col_ser)
idx_lt = df.lt(idx_ser, axis=0)
col_lt = df.lt(col_ser)
tm.assert_frame_equal(col_ge, df >= pd.Series(col_ser))
tm.assert_frame_equal(col_ge, -col_lt)
tm.assert_frame_equal(idx_ge, -idx_lt)
tm.assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)
idx_ser = pd.Series(np.random.randn(5))
col_ser = pd.Series(np.random.randn(3))
_test_seq(df, idx_ser, col_ser)
# list/tuple
_test_seq(df, idx_ser.values, col_ser.values)
# NA
df.loc[0, 0] = np.nan
rs = df.eq(df)
assert not rs.loc[0, 0]
rs = df.ne(df)
assert rs.loc[0, 0]
rs = df.gt(df)
assert not rs.loc[0, 0]
rs = df.lt(df)
assert not rs.loc[0, 0]
rs = df.ge(df)
assert not rs.loc[0, 0]
rs = df.le(df)
assert not rs.loc[0, 0]
def test_bool_flex_frame_complex_dtype(self):
# complex
arr = np.array([np.nan, 1, 6, np.nan])
arr2 = np.array([2j, np.nan, 7, None])
df = pd.DataFrame({"a": arr})
df2 = pd.DataFrame({"a": arr2})
msg = "|".join(
[
"'>' not supported between instances of '.*' and 'complex'",
r"unorderable types: .*complex\(\)", # PY35
]
)
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df.gt(df2)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df["a"].gt(df2["a"])
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df.values > df2.values
rs = df.ne(df2)
assert rs.values.all()
arr3 = np.array([2j, np.nan, None])
df3 = pd.DataFrame({"a": arr3})
with pytest.raises(TypeError, match=msg):
# inequalities are not well-defined for complex numbers
df3.gt(2j)
with pytest.raises(TypeError, match=msg):
# regression test that we get the same behavior for Series
df3["a"].gt(2j)
with pytest.raises(TypeError, match=msg):
# Check that we match numpy behavior here
df3.values > 2j
def test_bool_flex_frame_object_dtype(self):
# corner, dtype=object
df1 = pd.DataFrame({"col": ["foo", np.nan, "bar"]})
df2 = pd.DataFrame({"col": ["foo", datetime.now(), "bar"]})
result = df1.ne(df2)
exp = pd.DataFrame({"col": [False, True, False]})
tm.assert_frame_equal(result, exp)
def test_flex_comparison_nat(self):
# GH 15697, GH 22163 df.eq(pd.NaT) should behave like df == pd.NaT,
# and _definitely_ not be NaN
df = pd.DataFrame([pd.NaT])
result = df == pd.NaT
# result.iloc[0, 0] is a np.bool_ object
assert result.iloc[0, 0].item() is False
result = df.eq(pd.NaT)
assert result.iloc[0, 0].item() is False
result = df != pd.NaT
assert result.iloc[0, 0].item() is True
result = df.ne(pd.NaT)
assert result.iloc[0, 0].item() is True
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types(self, opname):
# GH 15077, non-empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
result = getattr(df, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_df_flex_cmp_constant_return_types_empty(self, opname):
# GH 15077 empty DataFrame
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0]})
const = 2
empty = df.iloc[:0]
result = getattr(empty, opname)(const).dtypes.value_counts()
tm.assert_series_equal(result, pd.Series([2], index=[np.dtype(bool)]))
# -------------------------------------------------------------------
# Arithmetic
class TestFrameFlexArithmetic:
def test_floordiv_axis0(self):
# make sure we df.floordiv(ser, axis=0) matches column-wise result
arr = np.arange(3)
ser = pd.Series(arr)
df = pd.DataFrame({"A": ser, "B": ser})
result = df.floordiv(ser, axis=0)
expected = pd.DataFrame({col: df[col] // ser for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = df.floordiv(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
@pytest.mark.slow
@pytest.mark.parametrize("opname", ["floordiv", "pow"])
def test_floordiv_axis0_numexpr_path(self, opname):
# case that goes through numexpr and has to fall back to masked_arith_op
op = getattr(operator, opname)
arr = np.arange(10 ** 6).reshape(100, -1)
df = pd.DataFrame(arr)
df["C"] = 1.0
ser = df[0]
result = getattr(df, opname)(ser, axis=0)
expected = pd.DataFrame({col: op(df[col], ser) for col in df.columns})
tm.assert_frame_equal(result, expected)
result2 = getattr(df, opname)(ser.values, axis=0)
tm.assert_frame_equal(result2, expected)
def test_df_add_td64_columnwise(self):
# GH 22534 Check that column-wise addition broadcasts correctly
dti = pd.date_range("2016-01-01", periods=10)
tdi = pd.timedelta_range("1", periods=10)
tser = | pd.Series(tdi) | pandas.Series |
#/*##########################################################################
# Copyright (C) 2020-2021 The University of Lorraine - France
#
# This file is part of the LIBS-ANN toolkit developed at the GeoRessources
# Laboratory of the University of Lorraine, France.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
import matplotlib.pyplot as plot
import multiprocessing
from multiprocessing import Pool
from multiprocessing import Process
import joblib
from joblib import Parallel, delayed
Ninerals = [
'Wolframite', 'Tourmaline', 'Sphalerite','Rutile','Quartz', 'Pyrite', 'Orthose', 'Muscovite', 'Molybdenite', 'Ilmenite',
'Hematite', 'Fluorite', 'Chlorite', 'Chalcopyrite', 'Cassiterite', 'Biotite', 'Arsenopyrite', 'Apatite', 'Albite']
Elemnts = ['Si', 'Al', 'K', 'Ca', 'Fe', 'Mg', 'Mn', 'CaF', 'S', 'Ti',
'Sn', 'W', 'Cu', 'Zn', 'Ba', 'Rb', 'Sr', 'Li', 'As', 'Mo', 'Na', 'P']
################################### Preprocessing the Data :
"""Reading Validation Data """
# Inputs
DF_Validation = pd.read_csv("Validation.csv" )
DF_Validation = DF_Validation.iloc[0:2000]
Input_Set_Validation = DF_Validation[Elemnts]
INPUTS_Validation = Input_Set_Validation.to_numpy()
""" fonction to get the maximum value in a list of proability """
def get_res(list_res):
max_value = None
max_idx = None
for idx, num in enumerate(list_res):
if (max_value is None or num > max_value):
max_value = num
max_idx = idx
return [max_idx, max_value]
################################### Predicting with the neural network :
def ProcessInput(k):
with tf.compat.v1.Session() as sess:
new_saver = tf.compat.v1.train.import_meta_graph( os.getcwd() + os.path.sep + "Checkpoints"+ os.path.sep + "ckpt.meta" )
new_saver.restore(sess, tf.train.latest_checkpoint(os.getcwd() + os.path.sep + "Checkpoints"))
graph = tf.compat.v1.get_default_graph()
Input = graph.get_tensor_by_name("ph_input:0")
print("Evaluation on the sample number : {}".format(k))
feed_dict ={Input : INPUTS_Validation[k:k+1] }
Prediction = graph.get_tensor_by_name("Prediction:0")
r = sess.run(Prediction,feed_dict)
return r
from multiprocessing import Pool
pool = multiprocessing.Pool(processes=4)
results = pool.map(ProcessInput, range(len(DF_Validation)) )
#Saving Results
R_Minerals = []
R_Probability = []
for w in results:
idx, elmnt = get_res(list_res =w[0])
R_Minerals.append(Ninerals[idx])
R_Probability.append(elmnt)
Minls = {'Minirals': R_Minerals, 'Prob': R_Probability}
df = | pd.DataFrame(data=Minls) | pandas.DataFrame |
#!/usr/bin/python3
"""
/**
******************************************************************************
* @file dominant_attribute.py
* @author <NAME>
* $Rev: 1 $
* $Date: Sat Nov 17 15:12:04 CST 2018 $
* @brief Functions related to Dominant Attribute Algorithm
******************************************************************************
* @copyright
* @internal
*
* @endinternal
*
* @details
* This file contains the functions related to Dominant Attribute Algorithm
* @ingroup Algorithm
*/
"""
import pandas as pd
import numpy as np
from .misc import levelOfConsistency
def generateDiscretizedValue(value, point_ranges, min_value, max_value):
"""
Generate discrete symbol based on range
This function computes the discrete symbol representation of the value based
on the point ranges supplied. The representation will be lowerRange..upperRange.
Note: point_ranges must be sorted already.
Parameters
----------
value: float
The value to be transformed
point_ranges: list
List of ranges
min_value: float
The minimum value of the range
max_value: float
The maximum value of the range
Returns
-------
str
Discretize symbol representation of value
"""
str_format = "{:.3f}..{:.3f}"
# No ranges
if (len(point_ranges) == 0):
return (str_format.format(min_value, max_value))
# Value is below all point_ranges
if (value < point_ranges[0]):
return (str_format.format(min_value, point_ranges[0]))
# value is between point_ranges
for i in range(1, len(point_ranges)):
if (value < point_ranges[i]):
return(str_format.format(point_ranges[i - 1], point_ranges[i]))
# value is above all point_ranges
return (str_format.format(point_ranges[len(point_ranges) - 1], max_value))
def generateDiscretizedDataFrame(df, chosen_cut_points):
"""
Generate discretized data frame based on cut_points
This function generates discretized data frame based on chosen_cut_points.
The decision column will always be the last column.
Parameters
----------
df: pandas.DataFrame
The data frame to be discretized
chosen_cut_points: dict
Dictionary of cut points with the attribute names as the dictionary keys
Returns
-------
pandas.DataFrame
Discretize data frame
"""
decision_colname = df.columns[-1]
attribute_colnames = df.columns[:-1]
numerical_attribute_colnames = df[attribute_colnames].select_dtypes(include = "number").columns
non_numerical_attribute_colnames = attribute_colnames.drop(numerical_attribute_colnames)
df_discretized = | pd.DataFrame() | pandas.DataFrame |
import json
import datetime
import numpy as np
import pandas as pd
from pandas import json_normalize
import sqlalchemy as sq
import requests
from oanda.oanda import Account # oanda_v20_platform.
import os.path
import logging
from utils.fileops import get_abs_path
# TODO add updated to the database and have a check to update each day
class MarketData(Account):
"""Creates a sqlite database of current market information - for use by the trading strategies.
DB Browser https://sqlitebrowser.org/ can be used for easy viewing and filtering.
Focused on daily data it incudes for every tradable instrument in a table with:
The Last 60 days of data
Yesterdays Volume, Open, High, Low, and Close
The 55 day Vol, O, H, L, C
The 20 day Vol, O, H, L, C
The 10 day Vol, O, H, L, C
True Range for each day - a volatility measure that captures gaps
N the 20 day average True Range - like ATR(20)
And a summary table of market data (called marketdata) required for trading effectively,
which includes the following information:
Trading costs such as financing rates and the days they are applied.
Pip positions (decimal points) for each instrument
Margin rates
Max and Min Trailing stop distances
Maximum order sizes
The average spread
The volatility (as N)
The spread percentage of N - enabling the selection of a trading range where trade costs are minimised
e.g. if spread is 20 and stop loss (SP) and take profit (TP) are 100 your trading edge has
to be able to overcome that ~20% cost to have any chance of succeeding - some of the instruments
with high spread % N are very hard (impossible) to trade profitably without a crystall ball.
The N per 100X spread provides a quick way to get the target trading range where the spread cost will
be ~1% e.g. US30_USD currently has a Nper100Spread of 1.92 and an N of 380 so if TP and SP are set to
380/1.92=198 pips you will only lose ~1% in spread cost and with the daily range at 380 you should
hit one of the targets in a day or so. Compared to say USD_JPY which currently has a N of 0.60 and
a Nper100Spread of 0.4 so if spread cost is kept to ~1% it will be a move of 1.5 (0.6/0.4) more like
3-4 days before a target will be hit. This column can be sorted to get a top 10 of instruments that
are efficeint to trade.
The asset class and base currency
Args:
db_path str, default='data/marketdata.db':
The path to the database from the directory where this class is being run.
"""
def __init__(self, db_path=get_abs_path(['oanda_v20_platform','data', 'marketdata.db']), **kwargs):
super().__init__(**kwargs)
self.logger = logging.getLogger(__name__)
# setup connection to the database
self.db_path=db_path
self.engine = sq.create_engine(f'sqlite:///{self.db_path}')
# does the db exist if not create it by connecting
if not os.path.isfile(self.db_path):
conn = self.engine.connect()
conn.execute("commit")
conn.close()
self.logger.info(f"Empty MarketData database created at: {self.db_path}")
# get todays date
self.today = datetime.datetime.now().strftime('%Y-%m-%d')
try: # do we need to update marketdata?
sql = """SELECT DISTINCT(Updated) FROM marketdata;"""
data_date= pd.read_sql_query(sql, con=self.engine)
except: # only an empty db exists - build db
self.instruments = self.get_instruments()
self.build_db()
self.logger.info("Market data added to the database")
# is marketdata out of date?
if data_date.loc[0].item() != self.today:
self.instruments = self.get_instruments()
self.build_db()
self.logger.info("Market data updated in the database")
else: # get the marketdata
df = pd.read_sql_query(sql="""SELECT name, type, marginRate, N, avgSpread,
"financing.longRate", "financing.shortRate",
"Spread % N"
FROM marketdata """,
con=self.engine)
self.marketdata = df[['name', 'type', 'marginRate', 'N', 'avgSpread',
'financing.longRate', 'financing.shortRate',
'Spread % N']].sort_values(by='Spread % N')
def get_core_assets(self):
pass
self.core = pd.read_sql_query(sql="""SELECT DISTINCT Base Currency, Asset FROM marketdata""", con=self.engine)
self.core_list = self.core['Instrument'].to_list()
def build_db(self):
# add data to the instruments
for i in self.instruments['instruments']:
ix = i['name']
self.logger.info(f"Collecting market data for {ix}")
# add the spread data for each instrument
i['avgSpread'] = self.avg_spread(self.spreads(ix))
# get the price data
df = self.make_dataframe(self.get_daily_candles(ix))
i['volume'] = df.iloc[0, 0]
i['open'] = df.iloc[0, 1]
i['high'] = df.iloc[0, 2]
i['low'] = df.iloc[0, 3]
i['close'] = df.iloc[0, 4]
i['True Range'] = df.iloc[0, 5]
i['N'] = df.iloc[0, 6]
i['55DayHigh'] = df.iloc[0, 7]
i['20DayHigh'] = df.iloc[0, 8]
i['10DayHigh'] = df.iloc[0, 9]
i['55DayLow'] = df.iloc[0, 10]
i['20DayLow'] = df.iloc[0, 11]
i['10DayLow'] = df.iloc[0, 12]
tags = pd.DataFrame()
for n, i in enumerate(self.instruments['instruments']):
x = i['tags']
for l in x:
tags.loc[n, 'Asset Class'] = l['name']
fDayWeek = pd.DataFrame()
for n, i in enumerate(self.instruments['instruments']):
x = i['financing']['financingDaysOfWeek']
for d in x:
fDayWeek.loc[n, d['dayOfWeek'] + '-financing'] = d['daysCharged']
tags = tags.merge(fDayWeek, left_index=True, right_index=True)
df = json_normalize(self.instruments['instruments'])
df.drop(['tags', 'financing.financingDaysOfWeek'], inplace=True, axis=1)
df = df.merge(tags, left_index=True, right_index=True)
df['Spread % N'] = round(((df['avgSpread'] * 10.00**df['pipLocation']) / df['N'])*100, 2)
df['Nper100spread'] = df['N'] / ((df['avgSpread'] * 10.00**df['pipLocation']) * 100)
df['Base Currency'] = df.apply(lambda x: self.base(x), axis=1)
df['Asset'] = df.apply(lambda x: self.asset(x), axis=1)
df['Updated'] = self.today
df.to_sql('marketdata', con=self.engine, if_exists='replace')
def base(self, x):
return x['name'].split('_')[1]
def asset(self, x):
return x['name'].split('_')[0]
def get_instruments(self, params=None):
"""Get instruments and there associated static data.
By default gets the core instruments stored in a csv. These core
instruments are the unique available instruments.
Returns:
json: contains data that describes the available instruments
"""
url = self.base_url + '/v3/accounts/' + self.account + '/instruments'
r = requests.get(url, headers=self.headers)
self.logger.debug(f"Get Instruments returned {r} status code")
data = r.json()
return data
def avg_spread(self, spreads_json):
"""Calculate the average spread from the json returned by spreads
Args:
spreads_json: json produced by spreads function
Returns:
float: average of the average spreads
"""
spreads = []
for li in spreads_json['avg']:
spreads.append(li[1])
return np.mean(spreads)
def spreads(self, instrument, period=86400):
"""Returns a json with timestamps for every 15min
with the min, max and average spread.
Args:
instrument: str, required, e.g. "EUR_USD"
period: int, time period in seconds e.g. 86400 for day
Returns:
json: { "max": [[1520028000, 6], .....],
"avg": [[1520028000, 3.01822], ......],
"min": [[1520028000, 1.7], ......]
}
"""
params = {
"instrument": instrument,
"period": period
}
url = self.base_url + '/labs/v1/spreads/'
r = requests.get(url, headers=self.headers, params=params)
self.logger.debug(f"Spreads function returned {r} status code")
data = r.json()
return data
def get_daily_candles(self, instrument):
"""Request the daily candle data from the API
get 60 candles from yesterday
Args:
instrument: string describing the instrument in API
Returns:
json: candle data
"""
yesterday = (datetime.datetime.now() - pd.DateOffset(days=1)).strftime("%Y-%m-%d")
last_candle = yesterday + 'T22:00:00.000000000Z'
params = {
"to": last_candle,
"count": 60,
"granularity": "D",
# "includeFirst": True,
}
url = self.base_url + f'/v3/instruments/{instrument}/candles/'
r = requests.get(url, headers=self.headers, params=params)
self.logger.debug(f"Get daily candles returned {r} status code")
data = r.json()
return data
def make_dataframe(self, candles_data):
"""Take a json of candle data -
convert to a dataframe, calculate volatility,
max and min prices
Args:
candles_data ([json]): takes the json returned from get_candles
Returns:
sends data to sql table
pandas df: the last line of data
"""
df = json_normalize(candles_data.get('candles'))
df.rename(columns={'mid.c': 'close', 'mid.h': 'high',
'mid.l': 'low', 'mid.o': 'open'},
inplace=True)
df.set_index('time', inplace=True)
# the API returns strings these need to be converted to floats
df.volume = pd.to_numeric(df.volume)
df.close = pd.to_numeric(df.close)
df.high = pd.to_numeric(df.high)
df.low = pd.to_numeric(df.low)
df.open = pd.to_numeric(df.open)
# Calculate the recent volatility (True Range) taking account of gaps
# As in the Turtle trading methodology
# NOTE max() doesn't play well with shift so break out into columns
df['R1'] = df['high'] - df['low']
df['R2'] = df['high'] - df['close'].shift(1)
df['R3'] = df['close'].shift(1) - df['low']
# get the True Range
df['True Range'] = df[['R1', 'R2', 'R3']].max(axis=1)
# Get the Turtle N like ATR(20)
df['N'] = df['True Range'].rolling(window=20, min_periods=20).mean()
# Get the latest highs and lows
df['55DayHigh'] = df['high'].rolling(window=55, min_periods=55).max()
df['20DayHigh'] = df['high'].rolling(window=20, min_periods=20).max()
df['10DayHigh'] = df['high'].rolling(window=10, min_periods=10).max()
df['55DayLow'] = df['low'].rolling(window=55, min_periods=55).min()
df['20DayLow'] = df['low'].rolling(window=20, min_periods=20).min()
df['10DayLow'] = df['low'].rolling(window=10, min_periods=10).min()
df = df[['volume', 'open', 'high', 'low', 'close', 'True Range',
'N', '55DayHigh', '20DayHigh', '10DayHigh', '55DayLow',
'20DayLow', '10DayLow']]
# TODO sort index out to tidy up date string
df.to_sql(candles_data.get('instrument') ,con=self.engine,
if_exists='replace', index_label='time', index=True)
# return the last line - yesterdays data
df_ex = df.tail(1).reset_index(drop=True)
return df_ex.tail(1)
def json_to_dataframe(self, candles_data):
"""Take a json of candle data -
convert to a df
Args:
candles_data ([json]): takes the json returned from get_candles
Returns:
pandas df: the last line of data
"""
df = json_normalize(candles_data.get('candles'))
return df
def format_df(self, df):
df.rename(columns={'mid.c': 'close',
'mid.h': 'high',
'mid.l': 'low',
'mid.o': 'open'},
inplace=True)
df['time'] = pd.to_datetime(df['time'])
df.set_index('time', inplace=True)
# the API returns strings these need to be converted to floats
df.volume = pd.to_numeric(df.volume)
df.close = pd.to_numeric(df.close)
df.high = | pd.to_numeric(df.high) | pandas.to_numeric |
import logging
import math
import warnings
import numpy as np
import pandas as pd
import pytest
import scipy.stats
from dask import array as da, dataframe as dd
from distributed.utils_test import ( # noqa: F401
captured_logger,
cluster,
gen_cluster,
loop,
)
from sklearn.linear_model import SGDClassifier
from dask_ml._compat import DISTRIBUTED_2_5_0
from dask_ml.datasets import make_classification
from dask_ml.model_selection import (
HyperbandSearchCV,
IncrementalSearchCV,
SuccessiveHalvingSearchCV,
)
from dask_ml.model_selection._hyperband import _get_hyperband_params
from dask_ml.utils import ConstantFunction
from dask_ml.wrappers import Incremental
pytestmark = pytest.mark.skipif(not DISTRIBUTED_2_5_0, reason="hangs")
@pytest.mark.parametrize(
"array_type, library, max_iter",
[
("dask.array", "dask-ml", 9),
("numpy", "sklearn", 9),
("numpy", "ConstantFunction", 15),
("numpy", "ConstantFunction", 20),
],
)
def test_basic(array_type, library, max_iter):
@gen_cluster(client=True)
def _test_basic(c, s, a, b):
rng = da.random.RandomState(42)
n, d = (50, 2)
# create observations we know linear models can fit
X = rng.normal(size=(n, d), chunks=n // 2)
coef_star = rng.uniform(size=d, chunks=d)
y = da.sign(X.dot(coef_star))
if array_type == "numpy":
X, y = yield c.compute((X, y))
params = {
"loss": ["hinge", "log", "modified_huber", "squared_hinge", "perceptron"],
"average": [True, False],
"learning_rate": ["constant", "invscaling", "optimal"],
"eta0": np.logspace(-2, 0, num=1000),
}
model = SGDClassifier(
tol=-np.inf, penalty="elasticnet", random_state=42, eta0=0.1
)
if library == "dask-ml":
model = Incremental(model)
params = {"estimator__" + k: v for k, v in params.items()}
elif library == "ConstantFunction":
model = ConstantFunction()
params = {"value": np.linspace(0, 1, num=1000)}
search = HyperbandSearchCV(model, params, max_iter=max_iter, random_state=42)
classes = c.compute(da.unique(y))
yield search.fit(X, y, classes=classes)
if library == "dask-ml":
X, y = yield c.compute((X, y))
score = search.best_estimator_.score(X, y)
assert score == search.score(X, y)
assert 0 <= score <= 1
if library == "ConstantFunction":
assert score == search.best_score_
else:
# These are not equal because IncrementalSearchCV uses a train/test
# split and we're testing on the entire train dataset, not only the
# validation/test set.
assert abs(score - search.best_score_) < 0.1
assert type(search.best_estimator_) == type(model)
assert isinstance(search.best_params_, dict)
num_fit_models = len(set(search.cv_results_["model_id"]))
num_pf_calls = sum(
[v[-1]["partial_fit_calls"] for v in search.model_history_.values()]
)
models = {9: 17, 15: 17, 20: 17, 27: 49, 30: 49, 81: 143}
pf_calls = {9: 69, 15: 101, 20: 144, 27: 357, 30: 379, 81: 1581}
assert num_fit_models == models[max_iter]
assert num_pf_calls == pf_calls[max_iter]
best_idx = search.best_index_
if isinstance(model, ConstantFunction):
assert search.cv_results_["test_score"][best_idx] == max(
search.cv_results_["test_score"]
)
model_ids = {h["model_id"] for h in search.history_}
if math.log(max_iter, 3) % 1.0 == 0:
# log(max_iter, 3) % 1.0 == 0 is the good case when max_iter is a
# power of search.aggressiveness
# In this case, assert that more models are tried then the max_iter
assert len(model_ids) > max_iter
else:
# Otherwise, give some padding "almost as many estimators are tried
# as max_iter". 3 is a fudge number chosen to be the minimum; when
# max_iter=20, len(model_ids) == 17.
assert len(model_ids) + 3 >= max_iter
assert all("bracket" in id_ for id_ in model_ids)
_test_basic()
@pytest.mark.parametrize("max_iter,aggressiveness", [(27, 3), (30, 4)])
def test_hyperband_mirrors_paper_and_metadata(max_iter, aggressiveness):
@gen_cluster(client=True)
def _test_mirrors_paper(c, s, a, b):
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": np.random.rand(max_iter)}
alg = HyperbandSearchCV(
model,
params,
max_iter=max_iter,
random_state=0,
aggressiveness=aggressiveness,
)
yield alg.fit(X, y)
assert alg.metadata == alg.metadata_
assert isinstance(alg.metadata["brackets"], list)
assert set(alg.metadata.keys()) == {"n_models", "partial_fit_calls", "brackets"}
# Looping over alg.metadata["bracketes"] is okay because alg.metadata
# == alg.metadata_
for bracket in alg.metadata["brackets"]:
assert set(bracket.keys()) == {
"n_models",
"partial_fit_calls",
"bracket",
"SuccessiveHalvingSearchCV params",
"decisions",
}
if aggressiveness == 3:
assert alg.best_score_ == params["value"].max()
_test_mirrors_paper()
@gen_cluster(client=True)
def test_hyperband_patience(c, s, a, b):
# Test to make sure that specifying patience=True results in less
# computation
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
max_iter = 27
alg = HyperbandSearchCV(
model, params, max_iter=max_iter, patience=True, random_state=0
)
yield alg.fit(X, y)
alg_patience = max_iter // alg.aggressiveness
actual_decisions = [b.pop("decisions") for b in alg.metadata_["brackets"]]
paper_decisions = [b.pop("decisions") for b in alg.metadata["brackets"]]
for paper_iter, actual_iter in zip(paper_decisions, actual_decisions):
trimmed_paper_iter = {k for k in paper_iter if k <= alg_patience}
# This makes sure that the algorithm is executed faithfully when
# patience=True (and the proper decision points are preserved even if
# other stop-on-plateau points are added)
assert trimmed_paper_iter.issubset(set(actual_iter))
# This makes sure models aren't trained for too long
assert all(x <= alg_patience + 1 for x in actual_iter)
assert alg.metadata_["partial_fit_calls"] <= alg.metadata["partial_fit_calls"]
assert alg.best_score_ >= 0.9
max_iter = 6
kwargs = dict(max_iter=max_iter, aggressiveness=2)
alg = HyperbandSearchCV(model, params, patience=2, **kwargs)
with pytest.warns(UserWarning, match="The goal of `patience`"):
yield alg.fit(X, y)
alg = HyperbandSearchCV(model, params, patience=2, tol=np.nan, **kwargs)
yield alg.fit(X, y)
assert pd.DataFrame(alg.history_).partial_fit_calls.max() == max_iter
alg = HyperbandSearchCV(model, params, patience=2, tol=None, **kwargs)
yield alg.fit(X, y)
assert pd.DataFrame(alg.history_).partial_fit_calls.max() == max_iter
alg = HyperbandSearchCV(model, params, patience=1, **kwargs)
with pytest.raises(ValueError, match="always detect a plateau"):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield alg.fit(X, y)
@gen_cluster(client=True)
def test_cv_results_order_preserved(c, s, a, b):
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
alg = HyperbandSearchCV(model, params, max_iter=9, random_state=42)
yield alg.fit(X, y)
info = {k: v[-1] for k, v in alg.model_history_.items()}
for _, row in pd.DataFrame(alg.cv_results_).iterrows():
model_info = info[row["model_id"]]
assert row["bracket"] == model_info["bracket"]
assert row["params"] == model_info["params"]
assert np.allclose(row["test_score"], model_info["score"])
@gen_cluster(client=True)
def test_successive_halving_params(c, s, a, b):
# Makes sure when SHAs are fit with values from the "SuccessiveHalvingSearchCV
# params" key, the number of models/calls stay the same as Hyperband.
# This sanity check again makes sure parameters passed correctly
# (similar to `test_params_passed`)
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
alg = HyperbandSearchCV(model, params, max_iter=27, random_state=42)
kwargs = [v["SuccessiveHalvingSearchCV params"] for v in alg.metadata["brackets"]]
SHAs = [SuccessiveHalvingSearchCV(model, params, **v) for v in kwargs]
metadata = alg.metadata["brackets"]
for k, (true_meta, SHA) in enumerate(zip(metadata, SHAs)):
yield SHA.fit(X, y)
n_models = len(SHA.model_history_)
pf_calls = [v[-1]["partial_fit_calls"] for v in SHA.model_history_.values()]
assert true_meta["n_models"] == n_models
assert true_meta["partial_fit_calls"] == sum(pf_calls)
@gen_cluster(client=True)
def test_correct_params(c, s, a, b):
# Makes sure that Hyperband has the correct parameters.
# Implemented because Hyperband wraps SHA. Again, makes sure that parameters
# are correctly passed to SHA (had a case where max_iter= flag not passed to
# SuccessiveHalvingSearchCV but it should have been)
est = ConstantFunction()
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
params = {"value": np.linspace(0, 1)}
search = HyperbandSearchCV(est, params, max_iter=9)
base = {
"estimator",
"estimator__value",
"estimator__sleep",
"parameters",
"max_iter",
"test_size",
"patience",
"tol",
"random_state",
"scoring",
"verbose",
"prefix",
}
assert set(search.get_params().keys()) == base.union({"aggressiveness"})
meta = search.metadata
SHAs_params = [
bracket["SuccessiveHalvingSearchCV params"] for bracket in meta["brackets"]
]
SHA_params = base.union(
{
"n_initial_parameters",
"n_initial_iter",
"aggressiveness",
"max_iter",
"prefix",
}
) - {"estimator__sleep", "estimator__value", "estimator", "parameters"}
assert all(set(SHA) == SHA_params for SHA in SHAs_params)
# this is testing to make sure that each SHA has the correct estimator
yield search.fit(X, y)
SHAs = search._SuccessiveHalvings_
assert all(search.estimator is SHA.estimator for SHA in SHAs.values())
assert all(search.parameters is SHA.parameters for SHA in SHAs.values())
def test_params_passed():
# This makes sure that the "SuccessiveHalvingSearchCV params" key in
# Hyperband.metadata["brackets"] is correct and they can be instatiated.
est = ConstantFunction(value=0.4)
params = {"value": np.linspace(0, 1)}
params = {
"aggressiveness": 3.5,
"max_iter": 253,
"random_state": 42,
"scoring": False,
"test_size": 0.212,
"tol": 0,
}
params["patience"] = (params["max_iter"] // params["aggressiveness"]) + 4
hyperband = HyperbandSearchCV(est, params, **params)
for k, v in params.items():
assert getattr(hyperband, k) == v
brackets = hyperband.metadata["brackets"]
SHAs_params = [bracket["SuccessiveHalvingSearchCV params"] for bracket in brackets]
for SHA_params in SHAs_params:
for k, v in params.items():
if k == "random_state":
continue
assert SHA_params[k] == v
seeds = [SHA_params["random_state"] for SHA_params in SHAs_params]
assert len(set(seeds)) == len(seeds)
# decay_rate warnings are tested in test_incremental_warns.py
@pytest.mark.filterwarnings("ignore:decay_rate")
@gen_cluster(client=True)
def test_same_random_state_same_params(c, s, a, b):
# This makes sure parameters are sampled correctly when random state is
# specified.
# This test makes sure random state is *correctly* passed to successive
# halvings from Hyperband
seed = 0
values = scipy.stats.uniform(0, 1)
h = HyperbandSearchCV(
ConstantFunction(), {"value": values}, random_state=seed, max_iter=9
)
# Make a class for passive random sampling
passive = IncrementalSearchCV(
ConstantFunction(),
{"value": values},
random_state=seed,
max_iter=2,
n_initial_parameters=h.metadata["n_models"],
)
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
yield h.fit(X, y)
yield passive.fit(X, y)
# Check to make sure the Hyperbands found the same params
v_h = h.cv_results_["param_value"]
# Check to make sure the random passive search had *some* of the same params
v_passive = passive.cv_results_["param_value"]
# Sanity checks to make sure all unique floats
assert len(set(v_passive)) == len(v_passive)
assert len(set(v_h)) == len(v_h)
# Getting the `value`s that are the same for both searches
same = set(v_passive).intersection(set(v_h))
passive_models = h.metadata["brackets"][0]["n_models"]
assert len(same) == passive_models
def test_random_state_no_seed_different_params():
# Guarantees that specifying a random state relies in the successive
# halving's having the same random state (and likewise for no random state)
# This test is required because Hyperband wraps SHAs and the random state
# needs to be passed correctly.
values = scipy.stats.uniform(0, 1)
max_iter = 9
brackets = _get_hyperband_params(max_iter)
kwargs = {"value": values}
h1 = HyperbandSearchCV(ConstantFunction(), kwargs, max_iter=max_iter)
h2 = HyperbandSearchCV(ConstantFunction(), kwargs, max_iter=max_iter)
h1._get_SHAs(brackets)
h2._get_SHAs(brackets)
assert h1._SHA_seed != h2._SHA_seed
h1 = HyperbandSearchCV(ConstantFunction(), kwargs, max_iter=9, random_state=0)
h2 = HyperbandSearchCV(ConstantFunction(), kwargs, max_iter=9, random_state=0)
h1._get_SHAs(brackets)
h2._get_SHAs(brackets)
assert h1._SHA_seed == h2._SHA_seed
@gen_cluster(client=True)
def test_min_max_iter(c, s, a, b):
# This test makes sure Hyperband works with max_iter=1.
# Tests for max_iter < 1 are in test_incremental.py.
values = scipy.stats.uniform(0, 1)
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
max_iter = 1
h = HyperbandSearchCV(ConstantFunction(), {"value": values}, max_iter=max_iter)
yield h.fit(X, y)
assert h.best_score_ > 0
@gen_cluster(client=True)
def test_history(c, s, a, b):
# This test is required to make sure Hyperband wraps SHA successfully
# Mostly, it's a test to make sure ordered by time
#
# There's also a test in test_incremental to make sure the history has
# correct values/etc
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
alg = HyperbandSearchCV(model, params, max_iter=9, random_state=42)
yield alg.fit(X, y)
assert alg.cv_results_["model_id"].dtype == "<U11"
assert all(isinstance(v, str) for v in alg.cv_results_["model_id"])
assert all("bracket=" in h["model_id"] for h in alg.history_)
assert all("bracket" in h for h in alg.history_)
# Hyperband does a custom ordering of times
times = [v["elapsed_wall_time"] for v in alg.history_]
assert (np.diff(times) >= 0).all()
# Make sure results are ordered by partial fit calls for each model
for model_hist in alg.model_history_.values():
calls = [h["partial_fit_calls"] for h in model_hist]
assert (np.diff(calls) >= 1).all() or len(calls) == 1
@gen_cluster(client=True)
def test_logs_dont_repeat(c, s, a, b):
# This test is necessary to make sure the dask_ml.model_selection logger
# isn't piped to stdout repeatedly.
#
# I developed this test to protect against this case:
# getLogger("dask_ml.model_selection") is piped to stdout whenever a
# bracket of Hyperband starts/each time SHA._fit is called
X, y = make_classification(n_samples=10, n_features=4, chunks=10)
model = ConstantFunction()
params = {"value": scipy.stats.uniform(0, 1)}
search = HyperbandSearchCV(model, params, max_iter=9, random_state=42)
with captured_logger(logging.getLogger("dask_ml.model_selection")) as logs:
yield search.fit(X, y)
assert search.best_score_ > 0 # ensure search ran
messages = logs.getvalue().splitlines()
model_creation_msgs = [m for m in messages if "creating" in m]
n_models = [m.split(" ")[-2] for m in model_creation_msgs]
bracket_models = [b["n_models"] for b in search.metadata["brackets"]]
assert len(bracket_models) == len(set(bracket_models))
# Make sure only one model creation message is printed per bracket
# (all brackets have unique n_models as asserted above)
assert len(n_models) == len(set(n_models))
@gen_cluster(client=True)
async def test_dataframe_inputs(c, s, a, b):
X = | pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}) | pandas.DataFrame |
import pandas as pd
from bokeh.embed import components
from bokeh.models import HoverTool
from bokeh.models.formatters import DatetimeTickFormatter
from bokeh.palettes import Plasma256
from bokeh.plotting import figure, ColumnDataSource
from app import db
from app.decorators import data_quality
from app.main.data_quality_plots import data_quality_date_plot
@data_quality(name='wavelength', caption=' ')
def hbdet_bias_plot(start_date, end_date):
"""Return a <div> element with a weather downtime plot.
The plot shows the downtime for the period between start_date (inclusive) and end_date (exclusive).
Params:
-------
start_date: date
Earliest date to include in the plot.
end_date: date
Earliest date not to include in the plot.
Return:
-------
str:
A <div> element with the weather downtime plot.
"""
data_list = []
date_list = []
def brake_by_date(data):
_date = data["Date"].strftime('%Y-%m-%d')
if _date not in data_list:
date_dict = {'Name': [], 'Throughput': [], 'Center': [], 'Wavelength': [], "HMFW": []}
data_list.append(_date)
date_list.append(date_dict)
ind = data_list.index(_date)
date_list[ind]['Name'].append(data['Name'])
date_list[ind]['Throughput'].append(data['Throughput'])
date_list[ind]['Center'].append(data['Center'])
date_list[ind]['Wavelength'].append(str('%.1f' % data['Center']))
date_list[ind]['HMFW'].append(str('%.1f' %data['HMFW']))
column = 'SalticamThroughputMeasurement'
sql = 'select Date, SalticamFilter_Name as Name, DescriptiveName as Barcode, ' \
' SalticamThroughputMeasurement as Throughput' \
' from SalticamThroughputMeasurement ' \
' join Throughput using (Throughput_Id) ' \
' join NightInfo using (NightInfo_Id) ' \
' join SalticamFilter using(SalticamFilter_Id) ' \
.format(start_date=start_date, end_date=end_date, column=column)
df = pd.read_sql(sql, db.engine)
file_df = pd.read_csv('scam_data.txt', delimiter="\t")
file_df.columns = ['Barcode', 'Center', 'HMFW']
df['Barcode_lower'] = df['Barcode'].str.lower()
df['Barcode_lower'] = df['Barcode_lower'].str.replace(' ', '')
file_df['Barcode_low'] = file_df['Barcode'].str.lower()
file_df['Barcode_low'] = file_df['Barcode_low'].str.replace(' ', '')
file_df['Barcode_low'] = file_df['Barcode_low'].str.replace('_', '')
file_df['Center'] = file_df['Center']/10000
results = | pd.merge(df, file_df, left_on='Barcode_lower', right_on="Barcode_low", how='left') | pandas.merge |
import os
import pkg_resources
import rasterio
import simplejson
import numpy as np
import pandas as pd
def get_population_from_raster(raster_file, indices_list) -> float:
"""Get the population sum of all valid grid cells within a state.
:param raster_file: Full path with file name and extension to the input population raster file
:type raster_file: str
:param indices_list: List of index values for grid cells that are within the target state
:type indices_list: ndarray
:return: population sum in number of humans for the target state
"""
with rasterio.open(raster_file) as src:
return src.read(1).flatten()[indices_list].sum()
def get_state_list():
"""Get a list of states from the input directory.
:return: Array of all states
"""
states_df = pd.read_csv(pkg_resources.resource_filename('population_gravity', f'data/neighboring_states_150km.csv'))
return states_df['target_state'].unique()
def convert_1d_array_to_csv(coordinate_file, indices_file, run_1d_array_file, output_dir, output_type='valid',
nodata=-3.4028235e+38):
"""Convert a 1D array of grid cells values that fall within the target state to
a CSV file containing the following fields: [XCoord, YCoord, FID, n] where "XCoord" is
the X coordinate value, "YCoord" is the Y coordinate value, "FID" is the index
of the grid cell when flattened to a 1D array, and "n" is the population output from
the run.
The projected coordinate system is "EPSG:102003 - USA_Contiguous_Albers_Equal_Area_Conic"
:param coordinate_file: Full path with file name and extension to the input coordinate CSV
file containing all grid cell coordinates and their index ID for
each grid cell in the sample space. File name is generally:
<state_name>_coordinates.csv
:type coordinate_file: str
:param indices_file: Full path with file name and extension to the input indicies file
containing a list of index values that represent grid cells that fall
inside the state boundary. These index values are used to extract grid
cell values from rasters that have been read to a 2D array and then
flattened to 1D; where they still contain out-of-bounds-data. File name
is generally: <state_name>_within_indices.txt
:type indices_file: str
:param run_1d_array_file: Full path with file name and extension to a 1D array generated from
a run output. This array contains only grid cell values that fall
within the boundary of the target state. File name is generally:
<state_name>_1km_<scenario>_<setting>_<year>_1d.npy; where `scenario` is
the SSP and `setting` in either "rural", "urban", or "total"
:type run_1d_array_file: str
:param output_dir: Full path to the directory you wish to save the file in.
:type output_dir: str
:param output_type: Either "valid" or "full". Use "valid" to only export the grid cells
that are within the target state. Use "full" to export all grid cells
for the full extent. Default: "valid"
:type output_type: str
:param nodata: Value for NoData in the raster. Default: -3.4028234663852886e+38
:type nodata: float
:return: Data Frame of the combined data that has been written to a CSV file
"""
# validate output type
output_type = output_type.lower()
if output_type not in ('valid', 'full'):
raise ValueError(f"`output_type` must be either 'valid' or 'full'. You entered: '{output_type}'")
# read in coordinate file
df_coords = pd.read_csv(coordinate_file)
# read in within_indices file
with open(indices_file, 'r') as rn:
indices_list = simplejson.load(rn)
# load run 1D array
arr = np.load(run_1d_array_file)
# create a data frame of the run data
df_data = | pd.DataFrame({'FID': indices_list, 'n': arr}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import geomath as gm
import kalman
import math
def load_day(day):
header = ['timestamp', 'line_id', 'direction', 'jrny_patt_id', 'time_frame', 'journey_id', 'operator',
'congestion', 'lon', 'lat', 'delay', 'block_id', 'vehicle_id', 'stop_id', 'at_stop']
types = {'timestamp': np.int64,
'journey_id': np.int32,
'congestion': np.int8,
'lon': np.float64,
'lat': np.float64,
'delay': np.int8,
'vehicle_id': np.int32,
'at_stop': np.int8}
file_name = 'data/siri.201301{0:02d}.csv'.format(day)
df = pd.read_csv(file_name, header=None, names=header, dtype=types, parse_dates=['time_frame'],
infer_datetime_format=True)
null_replacements = {'line_id': 0, 'stop_id': 0}
df = df.fillna(value=null_replacements)
df['line_id'] = df['line_id'].astype(np.int32)
df['stop_id'] = df['stop_id'].astype(np.int32)
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='us')
return df
def calculate_durations(data_frame, vehicle_id):
one_second = np.timedelta64(1000000000, 'ns')
dv = data_frame[data_frame['vehicle_id'] == vehicle_id]
ts = dv.timestamp.values
dtd = ts[1:] - ts[:-1]
dt = np.zeros(len(dtd) + 1)
dt[1:] = dtd / one_second
return dt
def calculate_distances(data_frame, vehicle_id):
dv = data_frame[data_frame['vehicle_id'] == vehicle_id]
lat = dv.lat.values
lon = dv.lon.values
dxm = gm.haversine_np(lon[1:], lat[1:], lon[:-1], lat[:-1])
dx = np.zeros(len(dxm) + 1)
dx[1:] = dxm
return dx
def read_row(t, i):
r = np.zeros((4, 1), dtype=np.float)
r[0, 0] = t[i, 1]
r[1, 0] = t[i, 2]
return r
def read_observation(t, i):
r = np.zeros((2, 1), dtype=np.float)
r[0, 0] = t[i, 1]
r[1, 0] = t[i, 2]
return r
def convert_speed(deg_per_sec_x, deg_per_sec_y, lat, lon):
"""
Converts a speed in degrees per second decomposed in longitude and latitude components
into an absolute value measured in meters per second.
:param deg_per_sec_x: Speed along the longitude (x) axis
:param deg_per_sec_y: Speed along the latitude (y) axis
:param lat: Latitude of the location where the original speed is measured
:param lon: Longitude of the location where the original speed is measured
:return: Absolute value of the speed in meters per second.
"""
ms_x = gm.delta_degree_to_meters(lat, lon, delta_lon=deg_per_sec_x)
ms_y = gm.delta_degree_to_meters(lat, lon, delta_lat=deg_per_sec_y)
ms = math.sqrt(ms_x * ms_x + ms_y * ms_y)
return ms
def filter_trajectory(trajectory):
prev_x = read_row(trajectory, 0)
lat = prev_x[1, 0]
lon = prev_x[0, 0]
# Calculate the location standard deviations in degrees
sigma_x = gm.x_meters_to_degrees(10.0, lat, lon)
sigma_y = gm.y_meters_to_degrees(10.0, lat, lon)
sigma = np.array([sigma_x, sigma_y])
# Calculate the speed standard deviations in degrees per second
sigma_sx = gm.x_meters_to_degrees(10.0, lat, lon)
sigma_sy = gm.y_meters_to_degrees(10.0, lat, lon)
sigma_speed = np.array([sigma_sx, sigma_sy])
c = np.zeros((2, 4), dtype=np.float)
c[0, 0] = 1.0
c[1, 1] = 1.0
prev_p = kalman.calculate_p(sigma, sigma_speed)
# Set-up the result array
result = np.zeros((trajectory.shape[0], 5), dtype=np.float64)
result[0, 0:4] = np.transpose(prev_x)
result[0, 2:4] = result[0, 0:2]
for i in range(1, trajectory.shape[0]):
y = read_observation(trajectory, i)
phi = kalman.calculate_phi(trajectory[i, 0])
next_x, next_p = kalman.predict_step(prev_x, prev_p, phi, sigma_speed)
updated_x, updated_p = kalman.update_step(next_x, next_p, c, y, sigma)
speed_x = updated_x[2, 0] # Longitude speed in degrees per second
speed_y = updated_x[3, 0] # Latitude speed in degrees per second
px = y[0, 0] # Longitude
py = y[1, 0] # Latitude
ms = convert_speed(speed_x, speed_y, py, px) * 3.6 # Estimated speed in km/h
# Pack the row data
result[i, 0:2] = np.transpose(y)
result[i, 2:4] = np.transpose(updated_x[0:2, 0])
result[i, 4] = ms
# print("{0}, {1}, {2}, {3}, {4}".format(*result[i, :]))
prev_x, prev_p = updated_x, updated_p
return result
def result_to_dataframe(result, data_frame, vehicle):
out_columns = ['lon', 'lat', 'flt_lon', 'flt_lat', 'speed_flt']
df = data_frame.loc[data_frame['vehicle_id'] == vehicle, ['timestamp', 'stop_id', 'at_stop', 'delay', 'speed']]
flt = | pd.DataFrame(data=result, columns=out_columns, index=df.index.values) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
This module contains all the remote tests. The data for these
tests is requested to ESA NEOCC portal.
* Project: NEOCC portal Python interface
* Property: European Space Agency (ESA)
* Developed by: Elecnor Deimos
* Author: <NAME>
* Date: 02-11-2021
© Copyright [European Space Agency][2021]
All rights reserved
"""
import io
import os
import re
import random
import pytest
import pandas as pd
import pandas.testing as pdtesting
import pandas.api.types as ptypes
import requests
from astroquery.esa.neocc.__init__ import conf
from astroquery.esa.neocc import neocc, lists, tabs
import astropy
# Import BASE URL and TIMEOUT
API_URL = conf.API_URL
DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
TIMEOUT = conf.TIMEOUT
VERIFICATION = conf.SSL_CERT_VERIFICATION
@pytest.mark.remote_data
class TestLists:
"""Class which contains the unitary tests for lists module.
"""
# Dictionary for lists
lists_dict = {
"nea_list": 'allneo.lst',
"updated_nea": 'updated_nea.lst',
"monthly_update": 'monthly_update.done',
"risk_list": 'esa_risk_list',
"risk_list_special": 'esa_special_risk_list',
"close_approaches_upcoming": 'esa_upcoming_close_app',
"close_approaches_recent": 'esa_recent_close_app',
"priority_list": 'esa_priority_neo_list',
"priority_list_faint": 'esa_faint_neo_list',
"close_encounter" : 'close_encounter2.txt',
"impacted_objects" : 'impactedObjectsList.txt',
"neo_catalogue_current" : 'neo_kc.cat',
"neo_catalogue_middle" : 'neo_km.cat'
}
def test_get_list_url(self):
"""Test for checking the URL termination for requested lists.
Check invalid list name raise KeyError.
"""
# Valid inputs
valid_names = ["nea_list", "updated_nea", "monthly_update",
"risk_list", "risk_list_special",
"close_approaches_upcoming",
"close_approaches_recent",
"priority_list", "priority_list_faint",
"close_encounter", "impacted_objects"]
# Invalid inputs
bad_names = ["ASedfe", "%&$", "ÁftR+", 154]
# Assert for valid names
for element in valid_names:
assert lists.get_list_url(element) == \
self.lists_dict[element]
# Assert for invalid names
for elements in bad_names:
with pytest.raises(KeyError):
lists.get_list_url(elements)
def test_get_list_data(self):
"""Check data obtained is pandas.DataFrame or pandas.Series
"""
# Check pd.Series output
list_series = ["nea_list", "updated_nea", "monthly_update"]
for series in list_series:
assert isinstance(lists.get_list_data(self.\
lists_dict[series], series), pd.Series)
# Check pd.DataFrame output
list_dfs = ["risk_list", "risk_list_special",
"close_approaches_upcoming",
"close_approaches_recent", "priority_list",
"close_encounter", "priority_list_faint",
"impacted_objects"]
for dfs in list_dfs:
assert isinstance(lists.get_list_data(self.\
lists_dict[dfs], dfs), pd.DataFrame)
def test_parse_list(self):
"""Check data obtained is pandas.DataFrame or pandas.Series
"""
# Check pd.Series output
url_series = ["nea_list", "updated_nea", "monthly_update"]
for url in url_series:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
assert isinstance(lists.parse_list(url, data_list_d),
pd.Series)
# Check pd.DataFrame output
url_dfs = ["risk_list", "risk_list_special",
"close_approaches_upcoming",
"close_approaches_recent", "priority_list",
"close_encounter", "priority_list_faint",
"impacted_objects"]
for url in url_dfs:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
assert isinstance(lists.parse_list(url, data_list_d),
pd.DataFrame)
# Invalid inputs
bad_names = ["ASedfe", "%&$", "ÁftR+", 154]
# Assert for invalid names
for elements in bad_names:
with pytest.raises(KeyError):
lists.parse_list(elements, data_list_d)
def test_parse_nea(self):
"""Check data: nea list, updated nea list and monthly update
"""
url_series = ["nea_list", "updated_nea", "monthly_update"]
for url in url_series:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_nea(data_list_d)
# Assert is a pandas Series
assert isinstance(new_list, pd.Series)
# Assert is not empty
assert not new_list.empty
# List of all NEAs
if url == "nea_list":
filename = os.path.join(DATA_DIR, self.lists_dict[url])
content = open(filename, 'r')
nea_list = pd.read_csv(content, header=None)
# Remove whitespaces
nea_list = nea_list[0].str.strip().replace(r'\s+', ' ',
regex=True)\
.str.replace('# ', '')
# Check size of the data frame
assert len(new_list.index) > 20000
# Check 74 first elements are equal from reference
# data (since provisional designator may change)
pdtesting.assert_series_equal(new_list[0:74], nea_list[0:74])
else:
# Check date format DDD MMM DD HH:MM:SS UTC YYYY
assert re.match(r'\w{3} \w{3} \d{2} \d{2}:\d{2}:\d{2} '
r'\w{3} \d{4}', new_list.iloc[0])
def test_parse_risk(self):
"""Check data: risk_list, risk_list_special
"""
url_risks = ['risk_list', 'risk_list_special']
# Columns of risk lists
risk_columns = ['Object Name', 'Diameter in m', '*=Y',
'Date/Time', 'IP max', 'PS max', 'TS',
'Vel in km/s', 'First year', 'Last year',
'IP cum', 'PS cum']
risk_special_columns = risk_columns[0:8]
for url in url_risks:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_risk(data_list_d)
# Assert is a pandas DataFrame
assert isinstance(new_list, pd.DataFrame)
if url == 'risk_list':
# Assert dataframe is not empty, columns names, length
assert not new_list.empty
assert (new_list.columns == risk_columns).all()
assert len(new_list.index) > 1000
# Assert columns data types
# Floats
float_cols = ['Diameter in m', 'IP max', 'PS max',
'Vel in km/s', 'IP cum', 'PS cum']
assert all(ptypes.is_float_dtype(new_list[cols1])\
for cols1 in float_cols)
# int64
int_cols = ['TS', 'First year', 'Last year']
assert all(ptypes.is_int64_dtype(new_list[cols2])\
for cols2 in int_cols)
# Object
object_cols = ['Object Name', '*=Y']
assert all(ptypes.is_object_dtype(new_list[cols3])\
for cols3 in object_cols)
# Datetime
assert ptypes.is_datetime64_ns_dtype(
new_list['Date/Time'])
else:
# Currently risk special list is empty
assert new_list.empty
assert (new_list.columns == risk_special_columns).all()
def test_parse_clo(self):
"""Check data: close_approaches_upcoming,
close_approaches_recent
"""
url_close = ['close_approaches_upcoming',
'close_approaches_recent']
# Columns of close approaches lists
close_columns = ['Object Name', 'Date',
'Miss Distance in km', 'Miss Distance in au',
'Miss Distance in LD', 'Diameter in m',
'*=Yes', 'H', 'Max Bright',
'Rel. vel in km/s']
for url in url_close:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_clo(data_list_d)
# Assert is a pandas DataFrame
assert isinstance(new_list, pd.DataFrame)
# Assert dataframe is not empty, columns names and length
assert not new_list.empty
assert (new_list.columns == close_columns).all()
assert len(new_list.index) > 100
# Assert Connection Error. In case of internal server error
# the request provided an empty file
foo_error = io.StringIO('This site cant be reached\n'
'domain.com regused to connect\n'
'Search Google for domain\n'
'ERR_CONNECTION_REFUSED')
with pytest.raises(ConnectionError):
lists.parse_clo(foo_error)
# Assert columns data types
# Floats
float_cols = ['Miss Distance in au',
'Miss Distance in LD', 'Diameter in m', 'H',
'Max Bright', 'Rel. vel in km/s']
assert all(ptypes.is_float_dtype(new_list[cols1])\
for cols1 in float_cols)
# int64
assert ptypes.is_int64_dtype(new_list['Miss Distance in km'])
# Object
object_cols = ['Object Name', '*=Yes']
assert all(ptypes.is_object_dtype(new_list[cols3])\
for cols3 in object_cols)
# Datetime
assert ptypes.is_datetime64_ns_dtype(new_list['Date'])
def test_parse_pri(self):
"""Check data: priority_list, priority_list_faint
"""
url_priority = ['priority_list', 'priority_list_faint']
# Columns of close approaches lists
priority_columns = ['Priority', 'Object',
'R.A. in arcsec', 'Decl. in deg',
'Elong. in deg', 'V in mag', 'Sky uncert.',
'End of Visibility']
for url in url_priority:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_pri(data_list_d)
# Assert is a pandas DataFrame
assert isinstance(new_list, pd.DataFrame)
# Assert dataframe is not empty, columns names and length
assert not new_list.empty
assert (new_list.columns == priority_columns).all()
assert len(new_list.index) > 100
# Assert columns data types
# Floats
float_cols = ['R.A. in arcsec', 'Decl. in deg',
'V in mag']
assert all(ptypes.is_float_dtype(new_list[cols1])\
for cols1 in float_cols)
# int64
int_cols = ['Priority', 'Elong. in deg', 'Sky uncert.']
assert all(ptypes.is_int64_dtype(new_list[cols2])\
for cols2 in int_cols)
# Object
assert ptypes.is_object_dtype(new_list['Object'])
# Datetime
assert ptypes.is_datetime64_ns_dtype(
new_list['End of Visibility'])
def test_parse_encounter(self):
"""Check data: close_encounter
"""
url = 'close_encounter'
# Columns of close approaches lists
encounter_columns = ['Name/desig', 'Planet', 'Date',
'Time approach', 'Time uncert',
'Distance', 'Minimum distance',
'Distance uncertainty', 'Width',
'Stretch', 'Probability', 'Velocity',
'Max Mag']
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_encounter(data_list_d)
# Assert is a pandas DataFrame
assert isinstance(new_list, pd.DataFrame)
# Assert dataframe is not empty, columns names and length
assert not new_list.empty
assert (new_list.columns == encounter_columns).all()
assert len(new_list.index) > 100000
# Assert columns data types
# Floats
float_cols = encounter_columns[3:]
assert all(ptypes.is_float_dtype(new_list[cols1])\
for cols1 in float_cols)
# Object
object_cols = ['Name/desig', 'Planet']
assert all(ptypes.is_object_dtype(new_list[cols2])\
for cols2 in object_cols)
# Datetime
assert ptypes.is_datetime64_ns_dtype(new_list['Date'])
# Assert Connection Error. In case of internal server error
# the request provided an empty file
foo_error = io.StringIO('This site cant be reached\n'
'domain.com regused to connect\n'
'Search Google for domain\n'
'ERR_CONNECTION_REFUSED')
with pytest.raises(ConnectionError):
lists.parse_encounter(foo_error)
def test_parse_impacted(self):
"""Check data: impacted_objects
"""
url = 'impacted_objects'
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_impacted(data_list_d)
# Assert is a pandas DataFrame
assert isinstance(new_list, pd.DataFrame)
# Assert dataframe is not empty and length
assert not new_list.empty
assert len(new_list.index) < 100
# Assert columns data types
# Object
assert ptypes.is_object_dtype(new_list[0])
# Datetime
assert ptypes.is_datetime64_ns_dtype(new_list[1])
def test_parse_neo_catalogue(self):
"""Check data: close_encounter
"""
url_cat = ['neo_catalogue_current',
'neo_catalogue_middle']
# Columns of close approaches lists
cat_columns = ['Name', 'Epoch (MJD)', 'a', 'e', 'i',
'long. node', 'arg. peric.', 'mean anomaly',
'absolute magnitude', 'slope param.',
'non-grav param.']
for url in url_cat:
# Get data from URL
data_list = requests.get(API_URL + self.lists_dict[url],
timeout=TIMEOUT,
verify=VERIFICATION).content
# Decode the data using UTF-8
data_list_d = io.StringIO(data_list.decode('utf-8'))
# Parse using parse_nea
new_list = lists.parse_neo_catalogue(data_list_d)
# Assert is a pandas DataFrame
assert isinstance(new_list, pd.DataFrame)
# Assert dataframe is not empty, columns names and length
assert not new_list.empty
assert (new_list.columns == cat_columns).all()
assert len(new_list.index) > 10000
# Assert columns data types
# Floats
float_cols = ['Epoch (MJD)', 'a', 'e', 'i',
'long. node', 'arg. peric.', 'mean anomaly',
'absolute magnitude', 'slope param.']
assert all(ptypes.is_float_dtype(new_list[cols1])\
for cols1 in float_cols)
# Object
assert ptypes.is_object_dtype(new_list['Name'])
# Int
assert | ptypes.is_int64_dtype(new_list['non-grav param.']) | pandas.api.types.is_int64_dtype |
import numpy as np
import scipy.io as sio
import datetime as dt
import io
import requests
import pandas as pd
def _get_timestamp(line):
""" Get a datetime and epoch value from a standard DAT file line """
timestamp = dt.datetime.strptime(' '.join(line.strip().split(' ')[1:3]), '%Y/%m/%d %H:%M:%S.%f')
epoch = np.float64(timestamp.replace(tzinfo=dt.timezone.utc).timestamp()) # 'epoch' is unix time
return timestamp, epoch
def _get_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def open_gdrive_file(blob_id):
""" Read a file from Google Drive into memory. Returns an open (BytesIO) file-like object. """
url = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(url, params = { 'id' : blob_id }, stream = True,)
token = _get_token(response)
if token:
params = { 'id' : blob_id, 'confirm' : token }
response = session.get(url, params = params, stream = True)
file_bytes = response.content
return io.BytesIO(file_bytes)
def read_nav(blob_id, dive_number):
f = open_gdrive_file(blob_id)
mat = sio.loadmat(f, squeeze_me=True)
nrows = len(mat['rnv']['t'].take(0))
timestamp = []
for i in range(nrows):
timestamp.append(dt.datetime.utcfromtimestamp(mat['rnv']['t'].take(0)[i]))
epoch = list(mat['rnv']['t'].take(0))
dive_number = [dive_number] * nrows
lat = list(mat['rnv']['lat'].take(0))
lon = list(mat['rnv']['lon'].take(0))
depth = list(mat['rnv']['pos'].take(0)[:,2])
height = list(mat['rnv']['alt'].take(0))
heading = list(mat['rnv']['pos'].take(0)[:,3])
pitch = list(mat['rnv']['pos'].take(0)[:,4])
roll = list(mat['rnv']['pos'].take(0)[:,5])
# convert to dataframe
return pd.DataFrame({'timestamp': timestamp, 'epoch': epoch, 'dive_number': dive_number,
'lat': lat, 'lon': lon, 'depth': depth, 'heading': heading,
'pitch': pitch, 'roll': roll, 'height': height})
def read_paros(blob_id, dive_number):
paros_list = []
f = open_gdrive_file(blob_id)
for line in f:
line = line.decode('utf-8').strip()
if 'RAW' in line[0:3]:
if 'P2=' in line:
if len(line) == 57: # good lines from this instrument are length 57
timestamp, epoch = _get_timestamp(line)
a = line.split(' ')[3].split(',')[0].split('=')[1]
b = line.split(' ')[3].split(',')[1]
paros_list.append([timestamp, epoch, dive_number, np.float64(a), np.float64(b)])
# convert to dataframe (tau and eta are the the pressure and temperature signal periods in microseconds)
return pd.DataFrame(paros_list, columns=['timestamp', 'epoch', 'dive_number', 'tau', 'eta'])
def read_ustrain(blob_id, dive_number):
ustrain_list = []
f = open_gdrive_file(blob_id)
for line in f:
line = line.decode('utf-8').strip()
if 'MSA3' in line[0:4]:
if len(line.split(' ')) == 32: # good lines from this instrument will have 32 fields
timestamp, epoch = _get_timestamp(line)
ustrain_list.append([timestamp, epoch, dive_number] + list(map(np.float64, line.split(' ')[3:-1])))
# convert to dataframe
return pd.DataFrame(ustrain_list, columns=['timestamp','epoch','dive_number','a',
'b','c','d','e','f','g','h','i','j','k',
'l','m','n','o','p','q','r','s','t','u',
'v','w','x','y','z','aa','bb'])
def read_sbe3(blob_id, dive_number):
sbe3_list = []
f = open_gdrive_file(blob_id)
for line in f:
try:
line = line.decode('utf-8').strip()
except:
line = ''
if 'SBE3' in line[0:4]:
if len(line) == 58: # good lines from this instrument are length 58
timestamp, epoch = _get_timestamp(line)
counts_0 = np.int64(line.split(' ')[4])
counts_1 = np.int64(line.split(' ')[6])
if counts_0 > 500000 and counts_0 < 815000 and counts_1 > 450000 and counts_1 < 770000:
sbe3_list.append([timestamp, epoch, dive_number, counts_0, counts_1])
# convert to dataframe
return | pd.DataFrame(sbe3_list, columns=['timestamp','epoch','dive_number','counts_0','counts_1']) | pandas.DataFrame |
""" Implementation of ``SDAFile`` for working with SDA files.
The SDA format was designed to be universal to facilitate data sharing across
multiple languages. It supports reading and updating all record types, except
*function* records. It support writing *numeric*, *logical*, *cell*, and
*structure* records.
"""
from contextlib import contextmanager
from functools import partial
import os
import os.path as op
import re
import shutil
import tempfile
import h5py
import numpy as np
from .extract import extract
from .record_inserter import InserterRegistry
from .utils import (
are_signatures_equivalent, error_if_bad_header, error_if_not_writable,
get_decoded, is_valid_writable, set_encoded, unnest, unnest_record,
update_header, validate_structures, write_header,
)
WRITE_MODES = ('w', 'w-', 'x', 'a')
class SDAFile(object):
""" Read, write, inspect, and manipulate Sandia Data Archive files.
This supports version 1.1 of the Sandia Data Archive format.
"""
def __init__(self, name, mode='a', **kw):
""" Open an SDA file for reading, writing, or interrogation.
Parameters
----------
name : str
The name of the file to be loaded or created.
mode : str
r Read-only, file must exist
r+ Read/write, file must exist
w Create file, truncate if exists
w- or x Create file, fail if exists
a Read/write if exists, create otherwise (default)
kw :
Key-word arguments that are passed to the underlying HDF5 file. See
h5py.File for options.
"""
file_exists = op.isfile(name)
self._mode = mode
self._filename = name
self._kw = kw
self._registry = InserterRegistry()
# Check existence
if mode in ('r', 'r+') and not file_exists:
msg = "File '{}' does not exist".format(name)
raise IOError(msg)
# Check the header when mode requires the file to exist
if mode in ('r', 'r+') or (file_exists and mode == 'a'):
with self._h5file('r') as h5file:
error_if_bad_header(h5file)
# Check that file is writable when mode will write to file.
if mode != 'r':
with self._h5file('a') as h5file:
error_if_not_writable(h5file)
# Create the header if this is a new file
if mode in ('w', 'w-', 'x') or (not file_exists and mode == 'a'):
with self._h5file(mode) as h5file:
write_header(h5file.attrs)
# File properties
@property
def name(self):
""" File name on disk. """
return self._filename
@property
def mode(self):
""" Mode used to open file. """
return self._mode
# Format attrs
@property
def FileFormat(self):
""" The 'FileFormat' file attribute. """
return self._get_attr('FileFormat')
@property
def FormatVersion(self):
""" The format version from the SDA file. """
return self._get_attr('FormatVersion')
@property
def Writable(self):
""" The 'Writable' flag from the SDA file. """
return self._get_attr('Writable')
@Writable.setter
def Writable(self, value):
if self._mode not in WRITE_MODES:
raise ValueError("File is not writable.")
if not is_valid_writable(value):
raise ValueError("Must be 'yes' or 'no'")
with self._h5file('r+') as h5file:
set_encoded(h5file.attrs, Writable=value)
@property
def Created(self):
""" The time the file was created. """
return self._get_attr('Created')
@property
def Updated(self):
""" The time the file was last updated. """
return self._get_attr('Updated')
# Public
def describe(self, label, description=''):
""" Change the description of a data entry.
Parameters
----------
label : str
The data label.
description : str
A description to accompany the data
Raises
------
ValueError if the label contains invalid characters
ValueError if the label does not exist
"""
self._validate_can_write()
self._validate_labels(label, must_exist=True)
with self._h5file('r+') as h5file:
set_encoded(h5file[label].attrs, Description=description)
update_header(h5file.attrs)
def extract(self, label):
""" Extract data from an SDA file.
Parameters
----------
label : str
The data label.
Returns
-------
data : object
Archive data associated with the label.
Notes
-----
Sparse numeric data is extracted as
:class:`coo_matrix<scipy:scipy.sparse.coo_matrix>`. This format does
not support all numpy operations.
Raises
------
ValueError if the label contains invalid characters
ValueError if the label does not exist
"""
self._validate_labels(label, must_exist=True)
with self._h5file('r') as h5file:
return extract(h5file, label)
def extract_to_file(self, label, path, overwrite=False):
""" Extract a file record to file.
Parameters
----------
label : str
Label of the file record.
path : str
The path to which the file is to be written.
overwrite : bool, optional
Unless specified as True, an existing file with the chosen name
will not be overwritten by this method.
Raises
------
IOError if `overwrite` is False and the destintation file exists.
"""
if op.exists(path) and not overwrite:
raise IOError("File '{}' exists. Will not overwrite.".format(path))
self._validate_labels(label, must_exist=True)
# Check that archive is a file archive
record_type = self._get_attr('RecordType', root=label)
if record_type != 'file':
raise ValueError("'{}' is not a file record".format(label))
with open(path, 'wb') as f:
f.write(self.extract(label))
def insert(self, label, data, description='', deflate=0,
as_structures=False):
""" Insert data into an SDA file.
Parameters
----------
label : str
The data label.
data :
The data to insert. See the notes below.
description : str, optional
A description to accompany the data
deflate : int, optional
An integer value from 0 to 9, specifying the compression level to
be applied to the stored data.
as_record : bool, optional
If specified, data that is storable as a cell record and has
homogenous cells will be stored as a "structures" record. Note that
this does not extend to nested cell records.
Raises
------
ValueError if the data is of an unsupported type
ValueError if the label contains invalid characters
ValueError if the label exists
ValueError if `as_structures` is True and the data cannot be stored as
a structures record.
Notes
-----
This stores specific data types as described here.
sequences :
Lists, tuples, and anything else that identifies as a
collections.abc.Sequence are stored as 'cell' records, no matter
the contents.
dicts :
Dictionaries are stored as 'structure' records.
numpy arrays :
If the dtype is a supported numeric type, then a numpy array is
stored as a 'numeric' record. Arrays of 'bool' type are stored as
'logical' records. Arrays of characters (dtype 'S1') are stored as
'character' records. Object and string arrays are stored as 'cell'
records.
sparse arrays (:class:`coo_matrix<scipy:scipy.sparse.coo_matrix>`) :
These are stored as 'numeric' records if the dtype is a type
supported for numeric numpy arrays.
strings :
Strings are stored as 'character' records. An attempt will be
made to convert the input to ascii encoded bytes, no matter the
underlying encoding. This may result in an encoding exception if
the input cannot be ascii encoded.
non-string scalars :
Non-string scalars are stored as 'numeric' if numeric, or 'logical'
if boolean.
file-like :
The contents of a file-like objects (with a 'read' method) are
stored as 'file' records.
other :
Arrays of characters are not supported. Convert to a string.
Object arrays are not supported. Cast to another dtype or turn into
a list.
Anything not listed above is not (intentionally) supported.
See Also
--------
insert_from_file : Insert contents of a named file.
"""
self._validate_can_write()
self._validate_labels(label, can_exist=False)
if not isinstance(deflate, (int, np.integer)) or not 0 <= deflate <= 9:
msg = "'deflate' must be an integer from 0 to 9"
raise ValueError(msg)
cls = self._registry.get_inserter(data)
if cls is None:
msg = "{!r} is not a supported type".format(data)
raise ValueError(msg)
inserter = cls(label, data, deflate, self._registry)
if as_structures:
if inserter.record_type != 'cell':
msg = "Data cannot be stored as a 'structures' record."
raise ValueError(msg)
validate_structures(data, self._registry)
# Tell the inserter to use the 'structures' record type
inserter.record_type = 'structures'
with self._h5file('r+') as h5file:
try:
inserter.insert(h5file, description)
except Exception:
# Do not leave things in an invalid state
if label in h5file:
del h5file[label]
raise
else:
update_header(h5file.attrs)
def insert_from_file(self, path, description='', deflate=0):
""" Insert the contents of a file as a file record.
Parameters
----------
path : str
The path to the file. The basename of the path will be used as the
record label.
description : str, optional
A description to accompany the data
deflate : int, optional
An integer value from 0 to 9, specifying the compression level to
be applied to the stored data.
Returns
-------
label : str
The label under which the file was stored.
See Also
--------
insert : Insert data into the archive
"""
if not op.isfile(path):
raise ValueError("File '{}' does not exist".format(path))
label = op.basename(path)
with open(path, 'rb') as f:
self.insert(label, f, description, deflate)
return label
def labels(self):
""" Get data labels from the archive.
Returns
-------
labels : list of str
Labels from the archive.
"""
with self._h5file('r') as h5file:
return list(h5file.keys())
def remove(self, *labels):
""" Remove specified records from the archive.
This cannot be undone.
"""
self._validate_can_write()
self._validate_labels(labels, must_exist=True)
# Create a new file so space is actually freed
def _copy_visitor(path, source, destination, labels):
""" Visitor that copies data from source to destination """
# Skip paths corresponding to excluded labels
if path.split('/')[0] in labels:
return
# Copy everything else
source_obj = source[path]
if isinstance(source_obj, h5py.Group):
dest_obj = destination.create_group(path)
else:
ds = source_obj
dest_obj = destination.create_dataset(
path,
data=source_obj[()],
chunks=ds.chunks,
maxshape=ds.maxshape,
compression=ds.compression,
compression_opts=ds.compression_opts,
scaleoffset=ds.scaleoffset,
shuffle=ds.shuffle,
fletcher32=ds.fletcher32,
fillvalue=ds.fillvalue,
)
dest_obj.attrs.update(source_obj.attrs)
pid, destination_path = tempfile.mkstemp()
os.close(pid)
with h5py.File(destination_path, 'w') as destination:
with self._h5file('r') as source:
destination.attrs.update(source.attrs)
source.visit(
partial(
_copy_visitor,
source=source,
destination=destination,
labels=set(labels),
)
)
update_header(destination.attrs)
shutil.move(destination_path, self._filename)
def probe(self, pattern=None):
""" Summarize the state of the archive
This requires the pandas package.
Parameters
----------
pattern : str or None, optional
A search pattern (python regular expression) applied to find
archive labels of interest. If None, all labels are selected.
Returns
-------
summary : :class:`DataFrame<pandas:pandas.DataFrame>`
A table summarizing the archive.
"""
from pandas import DataFrame
labels = self.labels()
if pattern is not None:
regex = re.compile(pattern)
labels = [
label for label in labels if regex.match(label) is not None
]
summary = []
with self._h5file('r') as h5file:
for label in labels:
g = h5file[label]
attrs = get_decoded(g.attrs)
if label in g:
attrs.update(get_decoded(g[label].attrs))
attrs['label'] = label
summary.append(attrs)
cols = [
'label', 'RecordType', 'Description', 'Empty', 'Deflate',
'Complex', 'ArraySize', 'Sparse', 'RecordSize', 'Class',
'FieldNames', 'Command',
]
return | DataFrame(summary, columns=cols) | pandas.DataFrame |
# Cluster EB Project
# Marin-French: GCs
# Salaris, Piskunov: OCs
"""Parameters we want:
name, RA, Dec, distance from galactic center, distance from Earth, size (ideally a half-mass radius),
number of stars, age, reddening (Av or E(B-V)), metallicity, central density, central velocity dispersion,
concentration parameter"""
from pandas import DataFrame
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy import units as u
from astropy.coordinates import SkyCoord, Distance, match_coordinates_sky
import glob
# Making .txt file for all data files in directory
path = '/Users/andrewbowen/ceb_project/data/'
filenames = ['Piskunov2008_M.Kharchenko2013_Rhm_FeH.txt',\
'Solaris2004_viaWEBDA_plusvandenbergh2006_diam_dist.txt', "WEBDA-OC-table-June2013_DavidJames.txt", "mwgc1.txt", "mwgc2.txt", "mwgc3.txt"]
############################# Open Clusters #################################################################################
# WEBDA data file (2013)
webda_df = pd.read_fwf(path + "OC_data/WEBDA-OC-table-June2013_DavidJames.txt", widths = [18,14,15,11,9,8,8,8,9,6,9,9,9,7,7,9], header = 0, \
names = ['name', 'RA', 'DEC' , 'l', 'b', 'Dist', 'Mod', 'EB-V',\
'Age','ST', 'Z', 'Diam','Fe/H','MRV','pm RA','pm Dec']) #reading in WEBDA-OC-table-June2013_DavidJames.txt datafile
webda_ID = webda_df['name']
webda_ID = webda_ID.str.replace( ' ','_' )#,regex = True)
webda_df['name'] = webda_ID
webda_df['w flag'] = ['W' for x in range(0,len(webda_df))]
# Converting WEBDA Diameter to pc from arcmin using distance (for later)
webda_diam = webda_df['Diam'].values * u.arcmin
# webda diameter conversion to radians
webda_diam_rad = webda_diam.to(u.radian)
webda_dist = webda_df['Dist'].values * u.pc#webda distance (given in pc)
# Conversion
webda_Diam = np.tan(webda_diam_rad) * webda_dist
W_D = pd.Series(webda_Diam)
# Setting values in WEBDA dataframe to pc diameters (linear size) - needed in cluster_muster.ipynb
webda_df['Diam'] = W_D
# print(webda_df['Diam'].to_string())
# Piskunov (2008)
piskunov_df = pd.read_csv(path + 'OC_data/PiskunovData_withCoords.csv', sep = ',', header = 0,\
names = ['name','RA','Dec','logM[Msun]', 'rtP[pc]', 'log(t[yr])K', 'rcK[pc]','rtK[pc]', 'Rhm[pc]', '[Fe/H]K]', 'distanceK[pc]'])
piskunov_df['p flag'] = ['P' for x in range(0,len(piskunov_df))]
# Solaris (2014)
# Had to add last 2 column names from <NAME> (2006): Diam[pc] - radius in parsecs, d[pc] = distance in parsecs
solaris_df = pd.read_csv(path + 'OC_data/SalarisData_withCoords.csv', sep = ',', header = 0,\
names = ['name', 'RA', 'Dec','deltaV', 'sigdV', '[FeH]', 'sigFeH', 't', 'sigt', 'logt', 'Rgc', 'z','Diam[pc]', 'd[pc]'])
solaris_df['s flag'] =['S' for x in range(0,len(solaris_df))]
# Merging Solaris and Piskunov dataframes
PS = piskunov_df.merge(solaris_df, on = 'name', how = 'outer') #works to merge 2 dfs with overlap -> Solaris/Piskunov
# print(PS)
Pisk_RA = PS['RA_x']
Pisk_Dec = PS['Dec_x']
Sol_RA = PS['RA_y']
Sol_Dec = PS['Dec_y']
PS_RA = Pisk_RA.combine_first(Sol_RA)
PS_Dec = Pisk_Dec.combine_first(Sol_Dec)
# Setting PS column values to new combined RA/Dec series
PS['RA'] = PS_RA
PS['Dec'] = PS_Dec
# # Dropping old labels and adding combined series to df
PS = PS.drop(labels = ['RA_x', 'Dec_x','RA_y', 'Dec_y'], axis =1)
# print(PS)
# Merging Solaris/Piskunov df with with WEDBA df
# OCs = webda_df.join(PS.set_index('name'), on = 'name', how = 'outer') #Should give all open clusters
OCs = webda_df.merge(PS, on = 'name', how = 'outer')
# Merging RA/Dec columns into one column
webda_RA = OCs['RA_x']
webda_Dec = OCs['DEC']
Sol_RA = OCs['RA_y']
Sol_Dec = OCs['Dec']
OC_RA = webda_RA.combine_first(Sol_RA)#combining the two series (webda values taken if both present)
OC_Dec = webda_Dec.combine_first(Sol_Dec)
OCs['RA'] = OC_RA
# print(OCs)
OCs = OCs.drop(labels = ['RA_x', 'DEC','RA_y', 'Dec'], axis = 1)
# Resetting declination column to our good merged values
OCs['Dec'] = OC_Dec
new_OC_cols = ['name', 'RA', 'Dec', 'l', 'b', 'Dist', 'Mod', 'EB-V', 'Age', 'ST', 'Z', 'Diam',\
'Fe/H', 'MRV', 'pm RA', 'pm Dec', 'w flag', 'logM[Msun]', 'rtP[pc]',\
'log(t[yr])K', 'rcK[pc]', 'rtK[pc]', 'Rhm[pc]', '[Fe/H]K]',\
'distanceK[pc]', 'p flag', 'deltaV', 'sigdV', '[FeH]', 'sigFeH', 't',\
'sigt', 'logt', 'Rgc', 'z', 'Diam[pc]', 'd[pc]', 's flag']
# Resetting OC column order - Coordinates after name
OCs = OCs[new_OC_cols]
needed_cols = ['name', 'RA', 'Dec', 'w flag', 'p flag', 's flag']
# Dropping Rows where we can't find an RA anywhere
indexNames = OCs[(OCs['RA'] == 'NaN') & (OCs['Dec'] == 'NaN')].index
OCs = OCs.drop(indexNames, axis = 0)
print(OCs[1760:1761])
# Missing RA/Dec values (from WEBDA database)
missingRA = []
missingDec = []
############################### Globular Clusters ##########################################################################
# Reading in mwgc data files separately then create one df with all 3 tables
mwgc1 = pd.read_fwf(path + "GC_data/mwgc1.txt", widths = [12,13,13,14,8,8,6,6,6,6,6], header = 0, \
names = ['ID', 'Name','RA','DEC','L', 'B', 'R_Sun', 'R_gc', 'X','Y','Z']) #widths are column widths in mwgc1 file, using read_fwf bc of funky 'Name' column in theis file
mwgc2 = | pd.read_fwf(path + "GC_data/mwgc2.txt", widths = [12,7,4,5,6,6,7,7,7,6,6,6,5,6], header = 0, \
names = ['ID','[Fe/H]', 'wt','E(B-V)', 'V_HB','(m-M)V', 'V_t', 'M_V,t','U-B','B-V','V-R', 'V-I', 'spt','ellip']) | pandas.read_fwf |
import tempfile
import unittest
import numpy as np
import pandas as pd
from airflow import DAG
from datetime import datetime
from mock import MagicMock, patch
import dd.api.workflow.dataset
from dd import DB
from dd.api.workflow.actions import Action
from dd.api.workflow.sql import SQLOperator
dd.api.workflow.dataset.is_ipython = lambda: True
dd.api.workflow.actions.is_ipython = lambda: True
from dd.api.contexts.distributed import AirflowContext
from dd.api.workflow.dataset import Dataset, DatasetLoad, DatasetTransformation
class TestDataset(unittest.TestCase):
def setUp(self):
self.workflow = MagicMock(spec_set=DAG("test_workflow", start_date=datetime.now()))
self.workflow.dag_id = "mock"
self.db = MagicMock()
self.db.query.result_value = None
def test_creating_dataset_should_add_task_to_workflow(self):
# Given
workflow = self.workflow
db = self.db
# When
_ = AirflowContext(workflow, db).create_dataset("table")
# Assert
workflow.add_task.assert_called_once()
def test_apply_method_should_run(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 7], [6, 7]])
# With a function with only args
def my_apply_function(indf, arg1, arg2, arg3):
self.assertEqual(arg1, 1)
self.assertEqual(arg2, 2)
self.assertEqual(arg3, 3)
odf = indf.applymap(lambda t: t + arg1 + arg2 + arg3)
self.assertTrue(odf.equals(expected_result1))
# When a valid execution
new_action = dataset.apply(my_apply_function, 1, 2, 3)
# Assert
self.assertFalse(new_action.executed)
new_action.execute()
def test_apply_method_should_raise_when_invalid_number_args(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
# With a function with only args
def my_apply_function(indf, arg1, arg2, arg3):
pass
# When
new_action = dataset.apply(my_apply_function, 1, 2)
# Assert
self.assertFalse(new_action.executed)
with self.assertRaises(TypeError) as context:
new_action.execute()
possible_exceptions = ["my_apply_function() missing 1 required positional argument: 'arg3'", # msg Python 3
"my_apply_function() takes exactly 4 arguments (3 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_action = dataset.apply(my_apply_function)
# Assert
self.assertFalse(new_action.executed)
with self.assertRaises(TypeError) as context:
new_action.execute()
possible_exceptions = ["my_apply_function() missing 3 required positional arguments: 'arg1', 'arg2', and 'arg3'", # msg Python 3
"my_apply_function() takes exactly 4 arguments (1 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
def test_transform_method_should_return_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.transform(lambda x: x)
# Assert
self.assertIsNot(new_dataset, dataset)
self.assertIsInstance(new_dataset, Dataset)
def test_transform_method_should_handle_optional_kwargs(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
dataset2 = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
# With a function with only args
def my_transform_function(indf, df2, arg1=0):
return indf.applymap(lambda t: t + arg1)
# When
new_dataset = dataset.transform(my_transform_function,
arg1=1,
output_table="mytable",
datasets=[dataset2],
write_options=dict(if_exists="replace"))
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
# Finally
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
self.assertTrue(new_dataset.output_table == "mytable")
def test_transform_method_should_raise_when_invalid_number_args(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 4], [3, 4]])
# With a function with only args
def my_transform_function(indf, arg1, arg2, arg3):
return indf.applymap(lambda t: t + arg1 + arg2 + arg3)
# When
new_dataset = dataset.transform(my_transform_function, 1, 2)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
with self.assertRaises(TypeError) as context:
new_dataset.execute()
possible_exceptions = ["my_transform_function() missing 1 required positional argument: 'arg3'", # msg Python 3
"my_transform_function() takes exactly 4 arguments (3 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_dataset = dataset.transform(my_transform_function)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
with self.assertRaises(TypeError) as context:
new_dataset.execute()
possible_exceptions = ["my_transform_function() missing 3 required positional arguments: 'arg1', 'arg2', and 'arg3'", # msg Python 3
"my_transform_function() takes exactly 4 arguments (1 given)"] # msg Python 2
self.assertIn(str(context.exception), possible_exceptions)
# When
new_dataset = dataset.transform(my_transform_function, 1, 1, 1)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
# Finally
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
def test_transform_method_should_handle_args_kwargs(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
expected_result2 = pd.DataFrame([[np.nan, 3], [2, 3]])
# With a function with arg and kwargs
def mytransfun(indf, myarg1, mynamedarg1=1):
return indf.applymap(lambda t: t + myarg1 - mynamedarg1)
# When
new_dataset = dataset.transform(mytransfun, 2)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
# When
new_dataset = dataset.transform(mytransfun, 2, mynamedarg1=0)
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertFalse(new_dataset.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result2
))
def test_transform_method_should_apply_function_to_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
self.db.retrieve_table.return_value = pd.DataFrame([[np.nan, 1],
[0, 1]])
dataset2 = context.create_dataset("table")
expected_result1 = pd.DataFrame([[np.nan, 2], [1, 2]])
expected_result2 = pd.DataFrame([[0.0, 1], [0.0, 1]])
# When
new_dataset = dataset.transform(lambda x: x.applymap(lambda t: t + 1))
new_dataset2 = dataset2.transform(lambda df: df.fillna(0))
# Assert
self.assertIsNone(new_dataset.dataframe)
self.assertIsNone(new_dataset2.dataframe)
self.assertFalse(new_dataset.executed)
self.assertFalse(new_dataset2.executed)
new_dataset.execute()
new_dataset.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result1
))
new_dataset2.execute()
new_dataset2.collect()
self.assertTrue(self.db.import_dataframe.call_args[0][0].equals(
expected_result2
))
def test_transform_method_should_be_able_to_process_multiple_datasets(
self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset1 = context.create_dataset("table")
dataset2 = context.create_dataset("table")
mock_function = MagicMock()
mock_function.__name__ = "mock"
new_dataset = dataset1.transform(mock_function, datasets=[dataset2])
# When
new_dataset.execute()
new_dataset.collect()
# Check
args, kwargs = mock_function.call_args
self.assertTrue(args[0], dataset1)
self.assertTrue(args[1], dataset2)
def test_collect_should_return_dataframe_attribute_when_non_empty(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
initial_dataframe = pd.DataFrame([[0.0, 1], [0.0, 1]])
dataset.dataframe = initial_dataframe
# When
dataframe = dataset.collect()
# Assert
self.assertIsInstance(dataframe, pd.DataFrame)
self.assertTrue(dataframe.equals(initial_dataframe))
def test_collect_should_call_db_retrieve_table_when_empty(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
output_table = "output_table"
dataset.output_table = output_table
# When
dataset.collect()
# Assert
self.db.retrieve_table.assert_called_once_with(output_table)
def test_split_train_test_should_return_two_datasets(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
train, test = dataset.split_train_test()
# Assert
self.assertIsInstance(train, Dataset)
self.assertIsInstance(test, Dataset)
def test_join_should_return_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset_left = context.create_dataset("table")
dataset_right = context.create_dataset("table")
# When
join = dataset_left.join(dataset_right)
# Check
self.assertIsInstance(join, Dataset)
def test_execute_should_call_operator_execute_once(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table").transform(lambda x: x)
dataset.operator = MagicMock()
# When
dataset.execute()
dataset.execute()
# Check
dataset.operator.execute.assert_called_once()
def test_execute_with_force_should_call_operator_execute_twice(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table").transform(lambda x: x)
dataset.operator = MagicMock()
# When
dataset.execute()
dataset.execute(force=True)
# Check
self.assertEqual(dataset.operator.execute.call_count, 2)
def test_execute_when_operator_is_DDOperator_should_return_resulted_dataframe_from_operator_get_result(self):
# Given
dataset = Dataset(MagicMock(), 'output')
dataset.executed = False
dataset.operator = MagicMock()
dataset.operator.execute = lambda: 'output_table'
dataset.operator.get_result = lambda: 'Dataframe'
dataset.operator.set_upstream = None
# When
result = dataset.execute()
# Check
self.assertEqual(result, 'Dataframe')
def test_transform_with_if_exists_should_append_to_existing_table(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
new_dataset = dataset.transform(lambda x: x,
write_options=dict(if_exists="append"))
# When
new_dataset.execute()
# Check
self.assertIn("if_exists", self.db.import_dataframe.call_args[1])
self.assertEqual(self.db.import_dataframe.call_args[1]["if_exists"],
"append")
def test_select_columns_should_create_new_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.select_columns(["foo", "bar"])
# Check
self.assertIsInstance(new_dataset, Dataset)
self.assertIsNot(new_dataset, dataset)
def test_default_is_cached_should_match_context_auto_persistence(self):
# Given
persisted_context = MagicMock()
persisted_context.auto_persistence = True
unpersisted_context = MagicMock()
unpersisted_context.auto_persistence = False
# When
persisted_dataset = Dataset(persisted_context, "foo")
unpersisted_dataset = Dataset(unpersisted_context, "bar")
# Check
self.assertTrue(persisted_dataset.is_cached)
self.assertFalse(unpersisted_dataset.is_cached)
def test_is_cached_attribute_may_be_set_by_cache_method(self):
# Given
context = MagicMock()
context.auto_persistence = False
dataset = Dataset(context, "foo")
# When
dataset.cache()
# Check
self.assertTrue(dataset.is_cached)
# Then when
dataset.cache(boolean=False)
# Check
self.assertFalse(dataset.is_cached)
def test_memory_usage_returns_integer(self):
# Given
context = MagicMock()
context.auto_persistence = False
dataset = Dataset(context, "foo")
# When
usage = dataset.memory_usage
# Check
self.assertIsInstance(usage, int)
def test_providing_output_table_in_select_columns_must_set_output_table(
self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.select_columns(["foo", "bar"],
output_table="myoutput.table")
# Check
self.assertEqual(new_dataset.output_table, "myoutput.table")
def test_sql_query_should_return_dataset(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
new_dataset = dataset.sql.query("SELECT * FROM foo.bar")
# Check
self.assertIsInstance(new_dataset, Dataset)
def test_sql_query_should_call_db_query(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
qw = dataset.sql.query("SELECT * FROM foo.bar")
qw.execute() # In airflow context we force execution
qw.head()
# Check
self.db.query.assert_called_once_with("SELECT * FROM foo.bar")
def test_sql_execute_should_return_action(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
action = dataset.sql.execute("SELECT * FROM foo.bar")
# Check
self.assertIsInstance(action, Action)
def test_sql_execute_should_call_db_execute(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
action = dataset.sql.execute("SELECT * FROM foo.bar")
# When
action.execute(force=True)
# Check
self.db.execute.assert_called_once_with("SELECT * FROM foo.bar")
def test_apply_should_return_action(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
mock_function = MagicMock()
mock_function.__name__ = "mock"
# When
result = dataset.apply(mock_function)
# Check
self.assertIsInstance(result, Action)
def test_sql_should_be_SQLOperator(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
result = dataset.sql
# Check
self.assertIsInstance(result, SQLOperator)
def test_sql_should_have_same_context(self):
# Given
context = AirflowContext(self.workflow, self.db)
dataset = context.create_dataset("table")
# When
result = dataset.sql
# Check
self.assertIs(result.context, dataset.context)
def test_multitransform_method_should_allow_multiple_output_datasets(self):
# Given
with tempfile.NamedTemporaryFile() as tmp:
workflow = DAG("test_workflow", start_date=datetime.now())
db = DB(dbtype='sqlite', filename=tmp.name)
ctx = AirflowContext(workflow, db)
# given
df = pd.DataFrame([[np.nan, 2], [1, 2]])
df.columns = map(lambda x: "num_" + str(x), df.columns)
expected_result2 = pd.DataFrame([[np.nan, 3], [2, 3]])
expected_result2.columns = map(lambda x: "num_" + str(x), expected_result2.columns)
db.import_dataframe(df, "test_num", index=False)
dataset = ctx.table("test_num")
# when
def my_multiple_output(indf):
return indf, indf + 1
new_df1, new_df2 = dataset.multitransform(my_multiple_output, output_tables=["odf1", "odf2"])
# then
self.assertIsNone(new_df1.dataframe)
self.assertFalse(new_df1.executed)
self.assertIsNone(new_df2.dataframe)
self.assertFalse(new_df2.executed)
# finally
new_df1.execute()
# same result
odf1 = new_df1.collect()
odf2 = new_df2.collect()
pd.testing.assert_frame_equal(odf1, df)
| pd.testing.assert_frame_equal(odf2, expected_result2) | pandas.testing.assert_frame_equal |
import os
import pandas as pd
from torch.nn import MSELoss
from easydict import EasyDict as edict
from readability_transformers import ReadabilityTransformer
from readability_transformers.readers import PairwiseDataReader, PredictionDataReader
from readability_transformers.dataset import CommonLitDataset
from readability_transformers.losses import WeightedRankingMSELoss
TEST_CSV_PATH = 'readability_transformers/dataset/data/test.csv'
OUTPUT_CSV_PATH = './'
def get_test_df():
commonlit_data = CommonLitDataset("test")
return commonlit_data.data
def inference_on_dataset():
model = ReadabilityTransformer(
model_path="checkpoints/ablations/eval_twostep/no_twostep/no_twostep_1",
device="cpu",
double=True
)
test_df = get_test_df()
ids = test_df["id"].values
passages = test_df["excerpt"].values
predictions = model.predict(passages, batch_size=3)
# making commmonlit submission
submission = []
for one in zip(ids, predictions.tolist()):
one_id, one_prediction = one
one_submit = {
"id": one_id,
"target": one_prediction
}
submission.append(one_submit)
submission = | pd.DataFrame(submission) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2014-2019 OpenEEmeter contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import numpy as np
import pandas as pd
import pytest
from eemeter.caltrack.usage_per_day import (
CalTRACKUsagePerDayCandidateModel,
CalTRACKUsagePerDayModelResults,
DataSufficiency,
_caltrack_predict_design_matrix,
fit_caltrack_usage_per_day_model,
caltrack_usage_per_day_predict,
caltrack_sufficiency_criteria,
get_intercept_only_candidate_models,
get_too_few_non_zero_degree_day_warning,
get_total_degree_day_too_low_warning,
get_parameter_negative_warning,
get_parameter_p_value_too_high_warning,
get_cdd_only_candidate_models,
get_hdd_only_candidate_models,
get_cdd_hdd_candidate_models,
select_best_candidate,
)
from eemeter.exceptions import MissingModelParameterError, UnrecognizedModelTypeError
from eemeter.features import (
compute_time_features,
compute_temperature_features,
compute_usage_per_day_feature,
merge_features,
)
from eemeter.metrics import ModelMetrics
from eemeter.warnings import EEMeterWarning
from eemeter.transform import day_counts, get_baseline_data
def test_candidate_model_minimal():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status"
)
assert candidate_model.model_type == "model_type"
assert candidate_model.formula == "formula"
assert candidate_model.status == "status"
assert candidate_model.model_params == {}
assert candidate_model.warnings == []
assert str(candidate_model).startswith("CalTRACKUsagePerDayCandidateModel")
assert candidate_model.json() == {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
}
def test_candidate_model_json_with_warning():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
warnings=[eemeter_warning],
)
assert candidate_model.json() == {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_candidate_model_json_none_and_nan_values():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
warnings=[eemeter_warning],
r_squared_adj=None,
)
assert candidate_model.json()["r_squared_adj"] is None
candidate_model.r_squared_adj = np.nan
assert candidate_model.json()["r_squared_adj"] is None
def test_data_sufficiency_minimal():
data_sufficiency = DataSufficiency(status="status", criteria_name="criteria_name")
assert data_sufficiency.status == "status"
assert data_sufficiency.criteria_name == "criteria_name"
assert data_sufficiency.warnings == []
assert data_sufficiency.settings == {}
assert str(data_sufficiency).startswith("DataSufficiency")
assert data_sufficiency.json() == {
"criteria_name": "criteria_name",
"data": {},
"settings": {},
"status": "status",
"warnings": [],
}
def test_data_sufficiency_json_with_warning():
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
data_sufficiency = DataSufficiency(
status="status", criteria_name="criteria_name", warnings=[eemeter_warning]
)
assert data_sufficiency.json() == {
"criteria_name": "criteria_name",
"settings": {},
"status": "status",
"data": {},
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_model_results_minimal():
model_results = CalTRACKUsagePerDayModelResults(
status="status", method_name="method_name"
)
assert model_results.status == "status"
assert model_results.method_name == "method_name"
assert model_results.model is None
assert model_results.r_squared_adj is None
assert model_results.candidates == []
assert model_results.warnings == []
assert model_results.metadata == {}
assert model_results.settings == {}
assert str(model_results).startswith("CalTRACKUsagePerDayModelResults")
assert model_results.json() == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": None,
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
def test_model_results_json_with_objects():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status"
)
eemeter_warning = EEMeterWarning(
qualified_name="qualified_name", description="description", data={}
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
candidates=[candidate_model],
warnings=[eemeter_warning],
)
assert model_results.json(with_candidates=True) == {
"candidates": [
{
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
}
],
"metadata": {},
"interval": None,
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [
{
"data": {},
"description": "description",
"qualified_name": "qualified_name",
}
],
}
def test_model_results_json_with_nan_r_squared_adj():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type",
formula="formula",
status="status",
r_squared_adj=np.nan,
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
r_squared_adj=np.nan,
)
assert model_results.json() == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": None,
"avgs_metrics": None,
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": None,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
def test_model_results_json_with_model_metrics():
candidate_model = CalTRACKUsagePerDayCandidateModel(
model_type="model_type", formula="formula", status="status", r_squared_adj=0.5
)
model_results = CalTRACKUsagePerDayModelResults(
status="status",
method_name="method_name",
model=candidate_model,
r_squared_adj=np.nan,
)
model_metrics = ModelMetrics(
observed_input=pd.Series([0, 1, 2]), predicted_input=pd.Series([1, 0, 2])
)
json_result = model_results.json()
json.dumps(json_result) # just make sure it's valid json
assert "totals_metrics" in json_result
assert "avgs_metrics" in json_result
json_result["totals_metrics"] = {} # overwrite because of floats
json_result["avgs_metrics"] = {} # overwrite because of floats
assert json_result == {
"candidates": None,
"interval": None,
"metadata": {},
"method_name": "method_name",
"totals_metrics": {},
"avgs_metrics": {},
"model": {
"formula": "formula",
"model_params": {},
"model_type": "model_type",
"r_squared_adj": 0.5,
"status": "status",
"warnings": [],
},
"settings": {},
"status": "status",
"r_squared_adj": None,
"warnings": [],
}
@pytest.fixture
def utc_index():
return pd.date_range("2011-01-01", freq="H", periods=365 * 24 + 1, tz="UTC")
@pytest.fixture
def temperature_data(utc_index):
series = pd.Series(
[
30.0 * ((i % (365 * 24.0)) / (365 * 24.0)) # 30 * frac of way through year
+ 50.0 # range from 50 to 80
for i in range(len(utc_index))
],
index=utc_index,
)
return series
@pytest.fixture
def prediction_index(temperature_data):
return temperature_data.resample("D").mean().index
@pytest.fixture
def candidate_model_no_model_none():
return CalTRACKUsagePerDayCandidateModel(
model_type=None,
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
def test_caltrack_predict_no_model_none(
candidate_model_no_model_none, prediction_index, temperature_data
):
with pytest.raises(ValueError):
candidate_model_no_model_none.predict(prediction_index, temperature_data)
@pytest.fixture
def candidate_model_intercept_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1},
)
def test_caltrack_predict_intercept_only(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert prediction["predicted_usage"].sum() == 365
assert sorted(prediction.columns) == ["predicted_usage"]
def test_caltrack_predict_intercept_only_with_disaggregated(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert prediction["base_load"].sum() == 365.0
assert prediction["cooling_load"].sum() == 0.0
assert prediction["heating_load"].sum() == 0.0
assert prediction["predicted_usage"].sum() == 365.0
assert sorted(prediction.columns) == [
"base_load",
"cooling_load",
"heating_load",
"predicted_usage",
]
def test_caltrack_predict_intercept_only_with_design_matrix(
candidate_model_intercept_only, prediction_index, temperature_data
):
model_prediction = candidate_model_intercept_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0.0
assert prediction.n_days_kept.sum() == 365
assert prediction.predicted_usage.sum() == 365.0
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_missing_params():
return CalTRACKUsagePerDayCandidateModel(
model_type="intercept_only",
formula="formula",
status="QUALIFIED",
model_params={},
)
def test_caltrack_predict_missing_params(
candidate_model_missing_params, prediction_index, temperature_data
):
with pytest.raises(MissingModelParameterError):
candidate_model_missing_params.predict(prediction_index, temperature_data)
@pytest.fixture
def candidate_model_cdd_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="cdd_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1, "beta_cdd": 1, "cooling_balance_point": 65},
)
def test_caltrack_predict_cdd_only(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data
)
prediction_df = model_prediction.result
assert round(prediction_df.predicted_usage.sum()) == 1733
def test_caltrack_predict_cdd_only_with_disaggregated(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1733
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 0.0
assert round(prediction.cooling_load.sum()) == 1368.0
def test_caltrack_predict_cdd_only_with_design_matrix(
candidate_model_cdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_65",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.cdd_65.sum()) == 1368.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1733
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_hdd_only():
return CalTRACKUsagePerDayCandidateModel(
model_type="hdd_only",
formula="formula",
status="QUALIFIED",
model_params={"intercept": 1, "beta_hdd": 1, "heating_balance_point": 65},
)
def test_caltrack_predict_hdd_only(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1734
def test_caltrack_predict_hdd_only_with_disaggregated(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1734
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 1369.0
assert round(prediction.cooling_load.sum()) == 0.0
def test_caltrack_predict_hdd_only_with_design_matrix(
candidate_model_hdd_only, prediction_index, temperature_data
):
model_prediction = candidate_model_hdd_only.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"hdd_65",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.hdd_65.sum()) == 1369.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1734
assert round(prediction.temperature_mean.mean()) == 65.0
@pytest.fixture
def candidate_model_cdd_hdd():
return CalTRACKUsagePerDayCandidateModel(
model_type="cdd_hdd",
formula="formula",
status="QUALIFIED",
model_params={
"intercept": 1,
"beta_hdd": 1,
"heating_balance_point": 60,
"beta_cdd": 1,
"cooling_balance_point": 70,
},
)
def test_caltrack_predict_cdd_hdd(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1582.0
def test_caltrack_predict_cdd_hdd_disaggregated(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data, with_disaggregated=True
)
prediction = model_prediction.result
assert round(prediction.predicted_usage.sum()) == 1582.0
assert round(prediction.base_load.sum()) == 365.0
assert round(prediction.heating_load.sum()) == 609.0
assert round(prediction.cooling_load.sum()) == 608.0
def test_caltrack_predict_cdd_hdd_with_design_matrix(
candidate_model_cdd_hdd, prediction_index, temperature_data
):
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temperature_data, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_70",
"hdd_60",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert round(prediction.cdd_70.sum()) == 608.0
assert round(prediction.hdd_60.sum()) == 609.0
assert prediction.n_days.sum() == 365.0
assert prediction.n_days_dropped.sum() == 0
assert prediction.n_days_kept.sum() == 365.0
assert round(prediction.predicted_usage.sum()) == 1582.0
assert round(prediction.temperature_mean.mean()) == 65.0
def test_caltrack_predict_cdd_hdd_with_design_matrix_missing_temp_data(
candidate_model_cdd_hdd, il_electricity_cdd_hdd_billing_monthly
):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
prediction_index = meter_data.index[2:4]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temp_data = temperature_data["2015-11":"2016-03"]
temp_data_greater_90perc_missing = temp_data[
~(
(pd.Timestamp("2016-01-27T12:00:00", tz="utc") < temp_data.index)
& (temp_data.index < pd.Timestamp("2016-01-31T12:00:00", tz="utc"))
)
].reindex(temp_data.index)
model_prediction = candidate_model_cdd_hdd.predict(
prediction_index, temp_data_greater_90perc_missing, with_design_matrix=True
)
prediction = model_prediction.result
assert sorted(prediction.columns) == [
"cdd_70",
"hdd_60",
"n_days",
"n_days_dropped",
"n_days_kept",
"predicted_usage",
"temperature_mean",
]
assert prediction.shape == (0, 7)
@pytest.fixture
def candidate_model_bad_model_type():
return CalTRACKUsagePerDayCandidateModel(
model_type="unknown", formula="formula", status="QUALIFIED", model_params={}
)
def test_caltrack_predict_bad_model_type(
candidate_model_bad_model_type, temperature_data, prediction_index
):
with pytest.raises(UnrecognizedModelTypeError):
candidate_model_bad_model_type.predict(prediction_index, temperature_data)
def test_caltrack_predict_empty(
candidate_model_bad_model_type, temperature_data, prediction_index
):
model_prediction_obj = candidate_model_bad_model_type.predict(
prediction_index[:0], temperature_data[:0]
)
assert model_prediction_obj.result.empty is True
@pytest.fixture
def cdd_hdd_h54_c67_billing_monthly_totals(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[54],
cooling_balance_points=[67],
use_mean_daily_values=False,
)
data = merge_features([meter_data, temperature_features])
return data
def test_caltrack_predict_design_matrix_input_avg_false_output_avg_true(
cdd_hdd_h54_c67_billing_monthly_totals
):
data = cdd_hdd_h54_c67_billing_monthly_totals
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=False,
output_averages=True,
)
assert round(prediction.mean(), 3) == 28.253
def test_caltrack_predict_design_matrix_input_avg_false_output_avg_false(
cdd_hdd_h54_c67_billing_monthly_totals
):
data = cdd_hdd_h54_c67_billing_monthly_totals
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=False,
output_averages=False,
)
assert round(prediction.mean(), 3) == 855.832
@pytest.fixture
def cdd_hdd_h54_c67_billing_monthly_avgs(il_electricity_cdd_hdd_billing_monthly):
meter_data = il_electricity_cdd_hdd_billing_monthly["meter_data"]
temperature_data = il_electricity_cdd_hdd_billing_monthly["temperature_data"]
temperature_features = compute_temperature_features(
meter_data.index,
temperature_data,
heating_balance_points=[54],
cooling_balance_points=[67],
use_mean_daily_values=True,
)
meter_data_feature = compute_usage_per_day_feature(meter_data)
data = merge_features([meter_data_feature, temperature_features])
return data
def test_caltrack_predict_design_matrix_input_avg_true_output_avg_false(
cdd_hdd_h54_c67_billing_monthly_avgs
):
data = cdd_hdd_h54_c67_billing_monthly_avgs
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=False,
)
assert round(prediction.mean(), 3) == 855.832
def test_caltrack_predict_design_matrix_input_avg_true_output_avg_true(
cdd_hdd_h54_c67_billing_monthly_avgs
):
data = cdd_hdd_h54_c67_billing_monthly_avgs
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
assert round(prediction.mean(), 3) == 28.253
def test_caltrack_predict_design_matrix_n_days(cdd_hdd_h54_c67_billing_monthly_totals):
# This makes sure that the method works with n_days when
# DatetimeIndexes are not available.
data = cdd_hdd_h54_c67_billing_monthly_totals
data = data.reset_index(drop=True)
data["n_days"] = 1
prediction = _caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
assert prediction.mean() is not None
def test_caltrack_predict_design_matrix_no_days_fails(
cdd_hdd_h54_c67_billing_monthly_totals
):
# This makes sure that the method fails if neither n_days nor
# a DatetimeIndex is available.
data = cdd_hdd_h54_c67_billing_monthly_totals
data = data.reset_index(drop=True)
with pytest.raises(ValueError):
_caltrack_predict_design_matrix(
"cdd_hdd",
{
"intercept": 13.420093629452852,
"beta_cdd": 2.257868665412409,
"beta_hdd": 1.0479347638717025,
"cooling_balance_point": 67,
"heating_balance_point": 54,
},
data,
input_averages=True,
output_averages=True,
)
def test_get_too_few_non_zero_degree_day_warning_ok():
warnings = get_too_few_non_zero_degree_day_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
degree_days=pd.Series([1, 1, 1]),
minimum_non_zero=2,
)
assert warnings == []
def test_get_too_few_non_zero_degree_day_warning_fail():
warnings = get_too_few_non_zero_degree_day_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
degree_days=pd.Series([0, 0, 3]),
minimum_non_zero=2,
)
assert len(warnings) == 1
warning = warnings[0]
assert warning.qualified_name == (
"eemeter.caltrack_daily.model_type.too_few_non_zero_xdd"
)
assert warning.description == (
"Number of non-zero daily XDD values below accepted minimum."
" Candidate fit not attempted."
)
assert warning.data == {
"minimum_non_zero_xdd": 2,
"n_non_zero_xdd": 1,
"xdd_balance_point": 65,
}
def test_get_total_degree_day_too_low_warning_ok():
warnings = get_total_degree_day_too_low_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
avg_degree_days=pd.Series([1, 1, 1]),
period_days=pd.Series([3, 1, 2]),
minimum_total=4,
)
assert warnings == []
def test_get_total_degree_day_too_low_warning_fail():
warnings = get_total_degree_day_too_low_warning(
model_type="model_type",
balance_point=65,
degree_day_type="xdd",
avg_degree_days= | pd.Series([0.5, 0.5, 0.5]) | pandas.Series |
import multiprocess as mp
import pandas as pd
from itertools import combinations
import cooler
from bioframe import parse_regions
from .. import expected
import click
from . import cli
from . import util
# might be relevant to us ...
# https://stackoverflow.com/questions/46577535/how-can-i-run-a-dask-distributed-local-cluster-from-the-command-line
# http://distributed.readthedocs.io/en/latest/setup.html#using-the-command-line
@cli.command()
@click.argument(
"cool_path",
metavar="COOL_PATH",
type=str,
nargs=1
)
@click.option(
"--nproc", "-p",
help="Number of processes to split the work between."
"[default: 1, i.e. no process pool]",
default=1,
type=int,
)
@click.option(
"--chunksize", "-c",
help="Control the number of pixels handled by each worker process at a time.",
type=int,
default=int(10e6),
show_default=True,
)
@click.option(
"--output", "-o",
help="Specify output file name to store the expected in a tsv format.",
type=str,
required=False,
)
@click.option(
"--hdf",
help="Use hdf5 format instead of tsv."
" Output file name must be specified [Not Implemented].",
is_flag=True,
default=False,
)
@click.option(
"--contact-type", "-t",
help="compute expected for cis or trans region of a Hi-C map."
"trans-expected is calculated for pairwise combinations of specified regions.",
type=click.Choice(["cis", "trans"]),
default="cis",
show_default=True,
)
@click.option(
"--regions",
help="Path to a BED file containing genomic regions "
"for which expected will be calculated. Region names can"
"be provided in a 4th column, otherwise UCSC notaion is used."
"When not specified, expected is calculated for all chromosomes",
type=click.Path(exists=True),
required=False,
)
@click.option(
"--balance/--no-balance",
help="Apply balancing weights to data before calculating expected."
"Bins masked in the balancing weights are ignored from calcualtions.",
is_flag=True,
default=True,
show_default=True,
)
@click.option(
"--weight-name",
help="Use balancing weight with this name.",
type=str,
default="weight",
show_default=True,
)
@click.option(
"--blacklist",
help="Path to a 3-column BED file containing genomic regions to mask "
"out during calculation of expected. Overwrites inference of "
"'bad' regions from balancing weights. [Not Implemented]",
type=click.Path(exists=True),
required=False,
)
@click.option(
"--ignore-diags",
help="Number of diagonals to neglect for cis contact type",
type=int,
default=2,
show_default=True,
)
def compute_expected(
cool_path,
nproc,
chunksize,
output,
hdf,
contact_type,
regions,
balance,
weight_name,
blacklist,
ignore_diags,
):
"""
Calculate expected Hi-C signal either for cis or for trans regions
of chromosomal interaction map.
When balancing weights are not applied to the data, there is no
masking of bad bins performed.
COOL_PATH : The paths to a .cool file with a balanced Hi-C map.
"""
if blacklist is not None:
raise NotImplementedError(
"Custom genomic regions for masking from calculation of expected"
"are not implemented."
)
# use blacklist-ing from cooler balance module
# https://github.com/mirnylab/cooler/blob/843dadca5ef58e3b794dbaf23430082c9a634532/cooler/cli/balance.py#L175
clr = cooler.Cooler(cool_path)
if regions is None:
regions = [(chrom, 0, clr.chromsizes[chrom]) for chrom in clr.chromnames]
regions = parse_regions(regions)
regions["name"] = clr.chromnames
else:
regions_buf, names = util.sniff_for_header(regions)
regions = pd.read_csv(regions_buf, sep="\t", header=None)
if regions.shape[1] not in (3, 4):
raise ValueError(
"The region file does not have three or four tab-delimited columns."
"We expect a bed file with columns chrom, start, end, and optional name"
)
if regions.shape[1] == 4:
regions = regions.rename(columns={0:"chrom",1:"start",2:"end",3:"name"})
regions = parse_regions(regions)
else:
regions = regions.rename(columns={0:"chrom",1:"start",2:"end"})
regions["name"] = list(regions.apply(lambda x: "{}:{}-{}".format(*x), axis=1))
regions = parse_regions(regions)
# define transofrms - balanced and raw ('count') for now
if balance:
weight1 = weight_name + "1"
weight2 = weight_name + "2"
transforms = {"balanced": lambda p: p["count"] * p[weight1] * p[weight2]}
else:
# no masking bad bins of any kind, when balancing is not applied
weight_name = None
transforms = {}
# execution details
if nproc > 1:
pool = mp.Pool(nproc)
map_ = pool.map
else:
map_ = map
# using try-clause to close mp.Pool properly
try:
if contact_type == "cis":
result = expected.diagsum(
clr,
regions,
transforms=transforms,
weight_name=weight_name,
bad_bins=None,
chunksize=chunksize,
ignore_diags=ignore_diags,
map=map_,
)
elif contact_type == "trans":
# prepare pairwise combinations of regions for trans-expected (blocksum):
regions_pairwise = combinations(regions.itertuples(index=False), 2)
regions1, regions2 = zip(*regions_pairwise)
result = expected.blocksum_asymm(
clr,
regions1 = pd.DataFrame(regions1),
regions2 = | pd.DataFrame(regions2) | pandas.DataFrame |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import scipy.stats as stats
import datetime
date_types = (
pd.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
datetime.datetime,
datetime.time
)
_isdate = lambda x: isinstance(x, date_types)
SPAN = 2 / 3.
ALPHA = 0.05 # significance level for confidence interval
def _snakify(txt):
txt = txt.strip().lower()
return '_'.join(txt.split())
def _plot_friendly(value):
if not isinstance(value, (np.ndarray, pd.Series)):
value = pd.Series(value)
return value
def lm(x, y, alpha=ALPHA):
"fits an OLS from statsmodels. returns tuple."
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
X = sm.add_constant(x)
fit = sm.OLS(y, X).fit()
prstd, iv_l, iv_u = wls_prediction_std(fit)
_, summary_values, summary_names = summary_table(fit, alpha=alpha)
df = pd.DataFrame(summary_values, columns=map(_snakify, summary_names))
# TODO: indexing w/ data frame is messing everything up
fittedvalues = df['predicted_value'].values
predict_mean_ci_low = df['mean_ci_95%_low'].values
predict_mean_ci_upp = df['mean_ci_95%_upp'].values
predict_ci_low = df['predict_ci_95%_low'].values
predict_ci_upp = df['predict_ci_95%_upp'].values
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, fittedvalues, predict_mean_ci_low, predict_mean_ci_upp
def lowess(x, y, span=SPAN):
"returns y-values estimated using the lowess function in statsmodels."
"""
for more see
statsmodels.nonparametric.smoothers_lowess.lowess
"""
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
result = smlowess(np.array(y), np.array(x), frac=span)
x = pd.Series(result[::,0])
y = pd.Series(result[::,1])
lower, upper = stats.t.interval(span, len(x), loc=0, scale=2)
std = np.std(y)
y1 = pd.Series(lower * std + y)
y2 = pd.Series(upper * std + y)
if x_is_date:
x = [pd.Timestamp.fromordinal(int(i)) for i in x]
return x, y, y1, y2
def mavg(x,y, window):
"compute moving average"
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
std_err = | pd.rolling_std(y, window) | pandas.rolling_std |
# -*- coding: utf-8 -*-
"""
2021
@author: <NAME>
"""
## Import libraries
import json
import os
import pandas as pd
import requests
from bs4 import BeautifulSoup
import sys
## Define where output files have to be saved
working_dir_input = input("\n\nInput here the path to the directory where the output files have to be saved:") # for example: C:/Users/nvwetter/OneDrive - Vrije Universiteit Brussel/Documenten
os.chdir(working_dir_input) # set working directory where output files are to be saved
## DataCite clients
response_DataCite = requests.get("https://api.datacite.org/clients/", timeout=5)
DataCite_clients = []
DataCite_clients.append(json.loads(response_DataCite.text))
totalPages = DataCite_clients[0]['meta']['totalPages']
for a in range(0, totalPages):
request_link = DataCite_clients[a]['links']['next'] # use next to go to next page of API
response_DataCite_2 = requests.get(request_link, timeout=15)
DataCite_clients.append(json.loads(response_DataCite_2.text))
sys.stdout.write("\rCurrent loop - DataCite API requests: %d of %d" % ((a + 1), totalPages))
sys.stdout.flush()
DataCite_info = []
DataCite_clients_final = []
for b in range(0,len(DataCite_clients)):
for c in DataCite_clients[b]['data']:
DataCite_info.append([c['attributes']['name'], c['attributes']['symbol'], c['attributes']['url']])
DataCite_clients_final.append(c['attributes']['symbol'])
df_1 = pd.DataFrame(DataCite_info, columns=['repo_name', 'repo_symbol', 'repo_url'], dtype = float)
df_1.to_excel((working_dir_input + '/output_DataCite_clients.xlsx'), index=False, header=True)
## Harvest usage statistics for DataCite client (API request also includes counts for datasets and software specific)
PublicationYear = input("\n\nInput here the publication year for which you want to obtain usage statistics:")
DataCite_results = [] # collect responses from DataCite API
DataCite_results_datasets = []
DataCite_results_software = []
for d in range(0, len(DataCite_clients_final)):
response_DataCite_repo = requests.get(("https://api.datacite.org/dois?query=publicationYear:" + PublicationYear + "&client-id=" + DataCite_clients_final[d].lower()), timeout=60)
if response_DataCite_repo.status_code == 200:
DataCite_results.append(json.loads(response_DataCite_repo.text)['meta']['total']) # use meta-field to harvest total number of DOIs for that repository & publication year
try:
dataset_ok = None
software_ok = None
resource_types_counts = json.loads(response_DataCite_repo.text)['meta']['resourceTypes']
for resource_type in resource_types_counts:
if resource_type['id'] == "dataset":
DataCite_results_datasets.append(resource_type["count"])
dataset_ok = "ok"
break
for resource_type in resource_types_counts:
if resource_type['id'] == "software":
DataCite_results_software.append(resource_type["count"])
software_ok = "ok"
break
if dataset_ok != "ok":
DataCite_results_datasets.append("NA")
if software_ok != "ok":
DataCite_results_software.append("NA")
except KeyError:
DataCite_results_datasets.append("error")
DataCite_results_software.append("error")
else:
DataCite_results.append("error")
DataCite_results_datasets.append("error")
DataCite_results_software.append("error")
sys.stdout.write("\rCurrent loop - DataCite API requests: %d of %d" % ((d + 1), len(DataCite_clients_final)))
sys.stdout.flush()
data_tuples = list(zip(DataCite_clients_final, DataCite_results, DataCite_results_datasets, DataCite_results_software))
df_2 = | pd.DataFrame(data_tuples, columns=['DataCite_clients_final', ('total_depo_' + PublicationYear), 'count_datasets', 'count_software'], dtype = float) | pandas.DataFrame |
import numpy as np, os, itertools
import pandas as pd
from rpy2 import robjects
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
import rpy2.robjects.pandas2ri
from rpy2.robjects.packages import importr
from selection.adjusted_MLE.cv_MLE import (sim_xy,
selInf_R,
glmnet_lasso,
BHfilter,
coverage,
relative_risk,
comparison_cvmetrics_selected,
comparison_cvmetrics_full,
comparison_cvmetrics_debiased)
def plotRisk(df_risk):
robjects.r("""
library("ggplot2")
library("magrittr")
library("tidyr")
library("dplyr")
plot_risk <- function(df_risk, outpath="/Users/psnigdha/adjusted_MLE/plots/", resolution=300, height= 7.5, width=15)
{
date = 1:length(unique(df_risk$snr))
df_risk = filter(df_risk, metric == "Full")
df = cbind(df_risk, date)
risk = df %>%
gather(key, value, sel.MLE, rand.LASSO, LASSO) %>%
ggplot(aes(x=date, y=value, colour=key, shape=key, linetype=key)) +
geom_point(size=3) +
geom_line(aes(linetype=key), size=1) +
ylim(0.01,1.2)+
labs(y="relative risk", x = "Signal regimes: snr") +
scale_x_continuous(breaks=1:length(unique(df_risk$snr)), label = sapply(df_risk$snr, toString)) +
theme(legend.position="top", legend.title = element_blank())
indices = sort(c("sel.MLE", "rand.LASSO", "LASSO"), index.return= TRUE)$ix
names = c("sel-MLE", "rand-LASSO", "LASSO")
risk = risk + scale_color_manual(labels = names[indices], values=c("#008B8B", "#104E8B","#B22222")[indices]) +
scale_shape_manual(labels = names[indices], values=c(15, 17, 16)[indices]) +
scale_linetype_manual(labels = names[indices], values = c(1,1,2)[indices])
outfile = paste(outpath, 'risk.png', sep="")
outfile = paste(outpath, 'risk.png', sep="")
ggsave(outfile, plot = risk, dpi=resolution, dev='png', height=height, width=width, units="cm")}
""")
robjects.pandas2ri.activate()
r_df_risk = robjects.conversion.py2ri(df_risk)
R_plot = robjects.globalenv['plot_risk']
R_plot(r_df_risk)
def plotCoveragePower(df_inference):
robjects.r("""
library("ggplot2")
library("magrittr")
library("tidyr")
library("reshape")
library("cowplot")
library("dplyr")
plot_coverage_lengths <- function(df_inference, outpath="/Users/psnigdha/adjusted_MLE/plots/",
resolution=200, height_plot1= 6.5, width_plot1=12,
height_plot2=13, width_plot2=13)
{
snr.len = length(unique(df_inference$snr))
df_inference = arrange(df_inference, method)
target = toString(df_inference$target[1])
df = data.frame(snr = sapply(unique(df_inference$snr), toString),
MLE = 100*df_inference$coverage[((2*snr.len)+1):(3*snr.len)],
Lee = 100*df_inference$coverage[1:snr.len],
Naive = 100*df_inference$coverage[((3*snr.len)+1):(4*snr.len)])
if(target== "selected"){
data.m <- melt(df, id.vars='snr')
coverage = ggplot(data.m, aes(snr, value)) +
geom_bar(aes(fill = variable), width = 0.4, position = position_dodge(width=0.5), stat="identity") +
geom_hline(yintercept = 90, linetype="dotted") +
labs(y="coverage: partial", x = "Signal regimes: snr") +
theme(legend.position="top",
legend.title = element_blank())
coverage = coverage +
scale_fill_manual(labels = c("MLE-based","Lee", "Naive"), values=c("#008B8B", "#B22222", "#FF6347"))} else{
df = cbind(df, Liu = 100*df_inference$coverage[((snr.len)+1):(2*snr.len)])
df <- df[c("snr", "MLE", "Liu", "Lee", "Naive")]
data.m <- melt(df, id.vars='snr')
coverage = ggplot(data.m, aes(snr, value)) +
geom_bar(aes(fill = variable), width = 0.4, position = position_dodge(width=0.5), stat="identity") +
geom_hline(yintercept = 90, linetype="dotted") +
labs(y="coverage: full", x = "Signal regimes: snr") +
theme(legend.position="top", legend.title = element_blank())
coverage = coverage +
scale_fill_manual(labels = c("MLE-based", "Liu", "Lee", "Naive"), values=c("#008B8B", "#104E8B", "#B22222", "#FF6347"))}
outfile = paste(outpath, 'coverage.png', sep="")
ggsave(outfile, plot = coverage, dpi=resolution, dev='png', height=height_plot1, width=width_plot1, units="cm")
df = data.frame(snr = sapply(unique(df_inference$snr), toString),
MLE = 100*df_inference$sel.power[((2*snr.len)+1):(3*snr.len)],
Lee = 100*df_inference$sel.power[1:snr.len])
if(target== "selected"){
data.m <- melt(df, id.vars='snr')
sel_power = ggplot(data.m, aes(snr, value)) +
geom_bar(aes(fill = variable), width = 0.4, position = position_dodge(width=0.5), stat="identity") +
labs(y="power: partial", x = "Signal regimes: snr") +
theme(legend.position="top", legend.title = element_blank())
sel_power = sel_power + scale_fill_manual(labels = c("MLE-based","Lee"), values=c("#008B8B", "#B22222"))} else{
df = cbind(df, Liu = 100*df_inference$sel.power[((snr.len)+1):(2*snr.len)])
df <- df[,c("snr", "MLE", "Liu", "Lee")]
data.m <- melt(df, id.vars='snr')
sel_power = ggplot(data.m, aes(snr, value)) +
geom_bar(aes(fill = variable), width = 0.4, position = position_dodge(width=0.5), stat="identity") +
labs(y="power: full", x = "Signal regimes: snr") +
theme(legend.position="top", legend.title = element_blank())
sel_power = sel_power + scale_fill_manual(labels = c("MLE-based","Liu","Lee"), values=c("#008B8B", "#104E8B", "#B22222"))}
outfile = paste(outpath, 'selective_power.png', sep="")
ggsave(outfile, plot = sel_power, dpi=resolution, dev='png', height=height_plot1, width=width_plot1, units="cm")
if(target== "selected"){
test_data <-data.frame(MLE = filter(df_inference, method == "MLE")$length,
Lee = filter(df_inference, method == "Lee")$length,
Naive = filter(df_inference, method == "Naive")$length,
date = 1:length(unique(df_inference$snr)))
lengths = test_data %>%
gather(key, value, MLE, Lee, Naive) %>%
ggplot(aes(x=date, y=value, colour=key, shape=key, linetype=key)) +
geom_point(size=3) +
geom_line(aes(linetype=key), size=1) +
ylim(0.,max(test_data$MLE, test_data$Lee, test_data$Naive) + 0.2)+
labs(y="lengths:partial", x = "Signal regimes: snr") +
scale_x_continuous(breaks=1:length(unique(df_inference$snr)), label = sapply(unique(df_inference$snr), toString))+
theme(legend.position="top", legend.title = element_blank())
indices = sort(c("MLE", "Lee", "Naive"), index.return= TRUE)$ix
names = c("MLE-based", "Lee", "Naive")
lengths = lengths + scale_color_manual(labels = names[indices], values=c("#008B8B","#B22222", "#FF6347")[indices]) +
scale_shape_manual(labels = names[indices], values=c(15, 17, 16)[indices]) +
scale_linetype_manual(labels = names[indices], values = c(1,1,2)[indices])} else{
test_data <-data.frame(MLE = filter(df_inference, method == "MLE")$length,
Lee = filter(df_inference, method == "Lee")$length,
Naive = filter(df_inference, method == "Naive")$length,
Liu = filter(df_inference, method == "Liu")$length,
date = 1:length(unique(df_inference$snr)))
lengths= test_data %>%
gather(key, value, MLE, Lee, Naive, Liu) %>%
ggplot(aes(x=date, y=value, colour=key, shape=key, linetype=key)) +
geom_point(size=3) +
geom_line(aes(linetype=key), size=1) +
ylim(0.,max(test_data$MLE, test_data$Lee, test_data$Naive, test_data$Liu) + 0.2)+
labs(y="lengths: full", x = "Signal regimes: snr") +
scale_x_continuous(breaks=1:length(unique(df_inference$snr)), label = sapply(unique(df_inference$snr), toString))+
theme(legend.position="top", legend.title = element_blank())
indices = sort(c("MLE", "Liu", "Lee", "Naive"), index.return= TRUE)$ix
names = c("MLE-based", "Lee", "Naive", "Liu")
lengths = lengths + scale_color_manual(labels = names[indices], values=c("#008B8B","#B22222", "#FF6347", "#104E8B")[indices]) +
scale_shape_manual(labels = names[indices], values=c(15, 17, 16, 15)[indices]) +
scale_linetype_manual(labels = names[indices], values = c(1,1,2,1)[indices])}
prop = filter(df_inference, method == "Lee")$prop.infty
df = data.frame(snr = sapply(unique(df_inference$snr), toString),
infinite = 100*prop)
data.prop <- melt(df, id.vars='snr')
pL = ggplot(data.prop, aes(snr, value)) +
geom_bar(aes(fill = variable), width = 0.4, position = position_dodge(width=0.5), stat="identity") +
labs(y="infinite intervals (%)", x = "Signal regimes: snr") +
theme(legend.position="top",
legend.title = element_blank())
pL = pL + scale_fill_manual(labels = c("Lee"), values=c("#B22222"))
prow <- plot_grid( pL + theme(legend.position="none"),
lengths + theme(legend.position="none"),
align = 'vh',
hjust = -1,
ncol = 1)
legend <- get_legend(lengths+ theme(legend.direction = "horizontal",legend.justification="center" ,legend.box.just = "bottom"))
p <- plot_grid(prow, ncol=1, legend, rel_heights = c(2., .2))
outfile = paste(outpath, 'length.png', sep="")
ggsave(outfile, plot = p, dpi=resolution, dev='png', height=height_plot2, width=width_plot2, units="cm")}
""")
robjects.pandas2ri.activate()
r_df_inference = robjects.conversion.py2ri(df_inference)
R_plot = robjects.globalenv['plot_coverage_lengths']
R_plot(r_df_inference)
def output_file(n=500, p=100, rho=0.35, s=5, beta_type=1, snr_values=np.array([0.10, 0.15, 0.20, 0.25, 0.30,
0.35, 0.42, 0.71, 1.22, 2.07]),
target="selected", tuning_nonrand="lambda.1se", tuning_rand="lambda.1se",
randomizing_scale = np.sqrt(0.50), ndraw = 50, outpath = None, plot=False):
df_selective_inference = pd.DataFrame()
df_risk = pd.DataFrame()
if n > p:
full_dispersion = True
else:
full_dispersion = False
snr_list = []
snr_list_0 = []
for snr in snr_values:
snr_list.append(snr*np.ones(4))
snr_list_0.append(snr*np.ones(2))
output_overall = np.zeros(55)
if target == "selected":
for i in range(ndraw):
output_overall += np.squeeze(comparison_cvmetrics_selected(n=n, p=p, nval=n, rho=rho, s=s, beta_type=beta_type, snr=snr,
randomizer_scale=randomizing_scale, full_dispersion=full_dispersion,
tuning_nonrand =tuning_nonrand, tuning_rand=tuning_rand))
elif target == "full":
for i in range(ndraw):
output_overall += np.squeeze(comparison_cvmetrics_full(n=n, p=p, nval=n, rho=rho, s=s, beta_type=beta_type, snr=snr,
randomizer_scale=randomizing_scale, full_dispersion=full_dispersion,
tuning_nonrand =tuning_nonrand, tuning_rand=tuning_rand))
elif target == "debiased":
for i in range(ndraw):
output_overall += np.squeeze(comparison_cvmetrics_debiased(n=n, p=p, nval=n, rho=rho, s=s, beta_type=beta_type, snr=snr,
randomizer_scale=randomizing_scale, full_dispersion=full_dispersion,
tuning_nonrand =tuning_nonrand, tuning_rand=tuning_rand))
nLee = output_overall[52]
nLiu = output_overall[53]
nMLE = output_overall[54]
relative_risk = (output_overall[0:6] / float(ndraw)).reshape((1, 6))
partial_risk = np.hstack(((output_overall[46:50] / float(ndraw-nMLE)).reshape((1, 4)),
(output_overall[50:52] / float(ndraw - nLee)).reshape((1, 2))))
nonrandomized_naive_inf = np.hstack(((output_overall[6:12] / float(ndraw - nLee)).reshape((1, 6)),
(output_overall[12:16] / float(ndraw)).reshape((1, 4))))
nonrandomized_Lee_inf = np.hstack(((output_overall[16:22] / float(ndraw - nLee)).reshape((1, 6)),
(output_overall[22:26] / float(ndraw)).reshape((1, 4))))
nonrandomized_Liu_inf = np.hstack(((output_overall[26:32] / float(ndraw - nLiu)).reshape((1, 6)),
(output_overall[32:36] / float(ndraw)).reshape((1, 4))))
randomized_MLE_inf = np.hstack(((output_overall[36:42] / float(ndraw - nMLE)).reshape((1, 6)),
(output_overall[42:46] / float(ndraw)).reshape((1, 4))))
if target=="selected":
nonrandomized_Liu_inf[nonrandomized_Liu_inf==0] = 'NaN'
if target == "debiased":
nonrandomized_Liu_inf[nonrandomized_Liu_inf == 0] = 'NaN'
nonrandomized_Lee_inf[nonrandomized_Lee_inf == 0] = 'NaN'
df_naive = pd.DataFrame(data=nonrandomized_naive_inf,columns=['coverage', 'length', 'prop-infty', 'tot-active', 'bias', 'sel-power',
'power', 'power-BH', 'fdr-BH','tot-discoveries'])
df_naive['method'] = "Naive"
df_Lee = pd.DataFrame(data=nonrandomized_Lee_inf, columns=['coverage', 'length', 'prop-infty','tot-active','bias', 'sel-power',
'power', 'power-BH', 'fdr-BH','tot-discoveries'])
df_Lee['method'] = "Lee"
df_Liu = pd.DataFrame(data=nonrandomized_Liu_inf,columns=['coverage', 'length', 'prop-infty', 'tot-active','bias', 'sel-power',
'power', 'power-BH', 'fdr-BH', 'tot-discoveries'])
df_Liu['method'] = "Liu"
df_MLE = pd.DataFrame(data=randomized_MLE_inf, columns=['coverage', 'length', 'prop-infty', 'tot-active','bias', 'sel-power',
'power', 'power-BH', 'fdr-BH', 'tot-discoveries'])
df_MLE['method'] = "MLE"
df_risk_metrics = | pd.DataFrame(data=relative_risk, columns=['sel-MLE', 'ind-est', 'rand-LASSO','rel-rand-LASSO', 'rel-LASSO', 'LASSO']) | pandas.DataFrame |
from lib.timecards import Timecards
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import re
def remove_gsa_gov(email):
return re.sub('@gsa.gov', '', email).lower()
def remove_non_billable_projects(project_name):
if ("TTS Acq" in project_name) or ("PIF" in project_name) or ("Federalist" in project_name):
return True
else:
return False
def get_roster(timecards):
employees = pd.read_csv('data/roster.csv')
employees["employee"] = employees["Email (UID)"].apply(remove_gsa_gov)
employees["inTimeCards"] = employees["employee"].apply(lambda x: x in timecards.user.values)
# roster = employees[employees["inTimeCards"] == True]
return employees
def get_employee_unit(employee, roster):
result = roster.loc[roster["employee"] == employee, "Team (Chapter/BU)"]
if len(result) > 0:
return result.values[0]
else:
return "N/A"
def check_understaffed(user, roster):
row = roster.loc[roster["employee"] == user.user.values[0]]
if len(row) > 0:
unit = row["Team (Chapter/BU)"].values
billable_status = row["Updated Billable Category"].values
billed_hours = user[user["billable"]].hours_spent.sum()
ooo = user[user["project_name"].str.contains("Out of Office")].hours_spent.sum()
if len(unit) > 0 and len(billable_status) > 0:
if billable_status[0] == "Primary" and billed_hours < 28:
return pd.Series({"user":user.user.values[0], "category": "Primary", "unit":unit[0], "hours": billed_hours, "delta": 32 - billed_hours, "ooo": ooo, "understaffed": ((ooo+billed_hours)/40 < .7)})
elif len(row) > 0 and billable_status[0] == "Partial" and billed_hours < 12:
return | pd.Series({"user":user.user.values[0], "category": "Partial", "unit":unit[0], "hours": billed_hours, "delta": 12 - billed_hours, "ooo": ooo, "understaffed": ((ooo+billed_hours)/40 < .2)}) | pandas.Series |
import sys
from pathlib import Path
from collections import namedtuple
import datetime as dt
import numpy as np
import re
import os
import multiprocessing
from functools import partial
from itertools import repeat
import pandas as pd
from .EC_DataLoader.CreateCV import create_CVs
from .HER.HER_analyze_scans import HER_scan
from file_py_helper.file_functions import FileOperations
import logging
logger = logging.getLogger(__name__)
Meta = namedtuple("Meta", "PAR_file data ovv")
def PF_fit_starmap_with_kwargs(pool, fn, args_iter, kwargs_iter):
args_for_starmap = zip(repeat(fn), args_iter, kwargs_iter)
return pool.starmap(PF_fit_apply_args_and_kwargs, args_for_starmap)
def PF_fit_apply_args_and_kwargs(fn, args, kwargs):
return fn(args, **kwargs)
def HER_prepare_ovv_dest(ovv_exp_grp, dest_dir_name="HER_dest_dir", **HER_kwargs):
# gr_ovv = ovv_exp_grp.get_group('HER')
gr_ovv = | pd.concat([gr for n, gr in ovv_exp_grp if "HER" in n]) | pandas.concat |
import os
import io
import sys
import gzip
import re
import sqlite3
import dbm
from glob import glob
from datetime import datetime, date, timedelta
from pprint import pprint
from math import nan, inf, pi as π, e
import math
from random import seed, choice, randint, sample
from contextlib import contextmanager
from collections import namedtuple
from collections import Counter
from itertools import islice
from textwrap import fill
from dataclasses import dataclass, astuple, asdict, fields
import json
from jsonschema import validate, ValidationError
import simplejson
import requests
import numpy as np
import pandas as pd
from cycler import cycler
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from matplotlib import cm
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
import seaborn as sns
import dask
import psycopg2
from sqlalchemy import create_engine
from sklearn.datasets import load_digits, load_breast_cancer
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.manifold import TSNE
from sklearn.preprocessing import PolynomialFeatures
from sklearn.feature_selection import RFECV, RFE
from pymongo import MongoClient
from IPython.display import Image as Show
import nltk
# Might use tokenization/stopwords
nltk.download('punkt', quiet=True)
nltk.download('stopwords', quiet=True)
# Do not display warnings
import warnings
warnings.simplefilter('ignore')
# Only show 8 rows from large DataFrame
pd.options.display.max_rows = 8
pd.options.display.min_rows = 8
# A bit of setup for monochrome book; not needed for most work
monochrome = (cycler('color', ['k', '0.5']) *
cycler('linestyle', ['-', '-.', ':']))
plt.rcParams['axes.prop_cycle'] = monochrome
plt.rcParams['figure.dpi'] = 100
plt.rcParams['savefig.dpi'] = 600
@contextmanager
def show_more_rows(new=sys.maxsize):
old_max = pd.options.display.max_rows
old_min = pd.options.display.min_rows
try:
pd.set_option("display.max_rows", new)
pd.set_option('display.min_rows', new)
yield old_max
finally:
pd.options.display.max_rows = old_max
pd.options.display.min_rows = old_min
# PostgreSQL configuration
def connect_local():
user = 'cleaning'
pwd = '<PASSWORD>'
host = 'localhost'
port = '5432'
db = 'dirty'
con = psycopg2.connect(database=db, host=host, user=user, password=pwd)
engine = create_engine(f'postgresql://{user}:{pwd}@{host}:{port}/{db}')
return con, engine
# MongoDB conection
def connect_mongo():
client = MongoClient(port=27017)
return client
# Utility function
def random_phone(reserved=True):
digits = '0123456789'
area = '000'
while area.startswith('0'):
area = ''.join(sample(digits, 3))
# DO NOT REMOVE prefix code
# it is not used now, but random seed assumes
# the exact same sequence of calls
prefix = '000'
while prefix.startswith('0'):
prefix = ''.join(sample(digits, 3))
suffix = ''.join(sample(digits, 4))
#-------------------------------------
if reserved:
prefix = "555"
return f"+1 {area} {prefix} {suffix}"
# Make the "business" database
def make_mongo_biz(client=connect_mongo()):
# Remove any existing DB and recreate it
client.drop_database('business')
db = client.business
# Create sample data
words = [
'Kitchen', 'Tasty', 'Big', 'City', 'Fish',
'Delight', 'Goat', 'Salty', 'Sweet']
title = ['Inc.', 'Restaurant', 'Take-Out']
cuisine = [
'Pizza', 'Sandwich', 'Italian', 'Mexican',
'American', 'Sushi', 'Vegetarian']
prices = ['cheap', 'reasonable', 'expensive']
seed(2)
info = {}
for n in range(50):
# Make a random business
name = (f"{choice(words)} {choice(words)} "
f"{choice(title)}")
info[name] = random_phone()
biz = {
'name': name,
'cuisine': choice(cuisine),
'phone': info[name]
}
db.info.insert_one(biz)
for n in range(5000):
# Make a random review
name = choice(list(info))
price = choice(prices)
review = {'name': name, 'price': price}
# Usually have a rating
if (n+5) % 100:
review['rating'] = randint(1, 10)
# Occasionally denormalize
if not (n+100) % 137:
review['phone'] = info[name]
# But sometimes inconsistently
if not n % 5:
review['phone'] = random_phone()
# Insert business into MongoDB
result = db.reviews.insert_one(review)
print('Created 50 restaurants; 5000 reviews')
def make_dbm_biz(client=connect_mongo()):
"We assume that make_mongo_biz() has run"
biz = client.business
ratings = dict()
with dbm.open('data/keyval.db', 'n') as db:
db['DESCRIPTION'] = 'Restaurant information'
now = datetime.isoformat(datetime.now())
db['LAST-UPDATE'] = now
# Add reviews
q = biz.reviews.find()
for n, review in enumerate(q):
key = f"{review['name']}::ratings"
# Occasionally use unusual delimiter
if not (n+50) % 100:
key = key.replace("::", "//")
# First rating or apppend to list?
rating = str(review.get('rating', ''))
if key in ratings:
old = ratings[key]
val = f"{old};{rating}"
else:
val = rating
db[key] = ratings[key] = val
# Add business info
for n, info in enumerate(biz.info.find()):
key1 = f"{info['name']}::info::phone"
key2 = f"{info['name']}::info::cuisine"
db[key1] = info['phone']
db[key2] = info['cuisine']
def pprint_json(jstr):
from json import dumps, loads
print(dumps(loads(jstr),indent=2))
def print_err(err):
print(err.__class__.__name__)
print(fill(str(err)))
def not_valid(instance, schema):
try:
return validate(instance, schema)
except ValidationError as err:
return str(err)
def make_missing_pg():
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS missing")
cur.execute("CREATE TABLE missing (a REAL, b CHAR(10))")
cur.execute("INSERT INTO missing(a, b) VALUES ('NaN', 'Not number')")
cur.execute("INSERT INTO missing(a, b) VALUES (1.23, 'A number')")
cur.execute("INSERT INTO missing(a, b) VALUES (NULL, 'A null')")
cur.execute("INSERT INTO missing(a, b) VALUES (3.45, 'Santiago')")
cur.execute("INSERT INTO missing(a, b) VALUES (6.78, '')")
cur.execute("INSERT INTO missing(a, b) VALUES (9.01, NULL)")
con.commit()
cur.execute("SELECT * FROM missing")
return cur.fetchall()
def make_dask_data():
df = dask.datasets.timeseries()
if not os.path.exists('data/dask'):
os.mkdir('data/dask')
df.to_csv('data/dask/*.csv',
name_function=lambda i: (
str(date(2000, 1, 1) +
i * timedelta(days=1)))
)
def dask_to_postgres():
"Put some data into postgres. Assumes Dask data was generated"
out = io.StringIO()
df = pd.read_csv('data/dask/2000-01-02.csv', parse_dates=['timestamp'])
df = df.loc[3456:5678]
df.to_sql('dask_sample', engine, if_exists='replace')
sql = """
ALTER TABLE dask_sample
ALTER COLUMN id TYPE smallint,
ALTER COLUMN name TYPE char(10),
ALTER COLUMN x TYPE decimal(6, 3),
ALTER COLUMN y TYPE real,
ALTER COLUMN index TYPE integer;
"""
cur = con.cursor()
cur.execute(sql)
cur.execute('COMMIT;')
describe = """
SELECT column_name, data_type, numeric_precision, character_maximum_length
FROM information_schema.columns
WHERE table_name='dask_sample';"""
cur.execute(describe)
for tup in cur:
print(f"{tup[0]}: {tup[1]} ({tup[2] or tup[3]})", file=out)
cur.execute("SELECT count(*) FROM dask_sample")
print("Rows:", cur.fetchone()[0], file=out)
return out.getvalue()
def make_bad_amtrak():
"Create deliberately truncated data"
out = io.StringIO()
df = pd.read_csv('data/AMTRAK-Stations-Database_2012.csv')
df = df[['Code', 'StationName', 'City', 'State']]
df['Visitors'] = np.random.randint(0, 35_000, 973)
df['StationName'] = df.StationName.str.split(', ', expand=True)[0]
df['StationName'] = df.StationName.str[:20]
df['Visitors'] = np.clip(df.Visitors, 0, 2**15-1)
df.to_sql('bad_amtrak', engine, if_exists='replace', index=False)
sql = """
ALTER TABLE bad_amtrak
ALTER COLUMN "StationName" TYPE char(20),
ALTER COLUMN "Visitors" TYPE smallint;
"""
cur = con.cursor()
cur.execute(sql)
cur.execute('COMMIT;')
describe = """
SELECT column_name, data_type, numeric_precision, character_maximum_length
FROM information_schema.columns
WHERE table_name='bad_amtrak';"""
cur.execute(describe)
for tup in cur:
print(f"{tup[0]}: {tup[1]} ({tup[2] or tup[3]})", file=out)
cur.execute("SELECT count(*) FROM bad_amtrak")
print("Rows:", cur.fetchone()[0], file=out)
return out.getvalue()
def make_h5_hierarchy():
import h5py
np.random.seed(1) # Make the notebook generate same random data
f = h5py.File('data/hierarchy.h5', 'w')
f.create_dataset('/deeply/nested/group/my_data',
(10, 10, 10, 10), dtype='i')
f.create_dataset('deeply/path/elsewhere/other', (20,), dtype='i')
f.create_dataset('deeply/path/that_data', (5, 5), dtype='f')
dset = f['/deeply/nested/group/my_data']
np.random.seed(seed=1)
dset[...] = np.random.randint(-99, 99, (10, 10, 10, 10))
dset.attrs['author'] = '<NAME>'
dset.attrs['citation'] = 'Cleaning Data Book'
dset.attrs['shape_type'] = '4-D integer array'
f.close()
def show_boxplots(df, cols, whis=1.5):
# Create as many horizontal plots as we have columns
fig, axes = plt.subplots(len(cols), 1, figsize=(10, 2*len(cols)))
# For each one, plot the non-null data inside it
for n, col in enumerate(cols):
data = df[col][df[col].notnull()]
axes[n].set_title(f'{col} Distribution')
# Extend whiskers to specified IQR multiplier
axes[n].boxplot(data, whis=whis, vert=False, sym='x')
axes[n].set_yticks([])
# Fix spacing of subplots at the end
fig.tight_layout()
plt.savefig(f"img/boxplot-{'_'.join(cols)}.png")
def make_corrupt_digits():
from sklearn.datasets import load_digits
import random as r
r.seed(1)
digits = load_digits().images[:50]
for digit in digits:
for _ in range(3):
x, y = r.randint(0, 7), r.randint(0, 7)
digit[x, y] = -1
np.save('data/digits.npy', digits.astype(np.int8))
# Load the digit data into namespace
digits = np.load('data/digits.npy')
def show_digits(digits=digits, x=3, y=3, title="Digits"):
"Display of 'corrupted numerals'"
if digits.min() >= 0:
newcm = cm.get_cmap('Greys', 17)
else:
gray = cm.get_cmap('Greys', 18)
newcolors = gray(np.linspace(0, 1, 18))
newcolors[:1, :] = np.array([1.0, 0.9, 0.9, 1])
newcm = ListedColormap(newcolors)
fig, axes = plt.subplots(x, y, figsize=(x*2.5, y*2.5),
subplot_kw={'xticks':(), 'yticks': ()})
for ax, img in zip(axes.ravel(), digits):
ax.imshow(img, cmap=newcm)
for i in range(8):
for j in range(8):
if img[i, j] == -1:
s = "╳"
c = "k"
else:
s = str(img[i, j])
c = "k" if img[i, j] < 8 else "w"
text = ax.text(j, i, s, color=c,
ha="center", va="center")
fig.suptitle(title, y=0)
fig.tight_layout()
plt.savefig(f'img/{title}.png')
kryptonite = pd.read_fwf('data/excited-kryptonite.fwf')
def plot_kryptonite(df=kryptonite,
independent='Wavelength_nm',
logx=True,
imputed=False):
fig, ax = plt.subplots(figsize=(10,3))
(df[df.Kryptonite_type == "Green"]
.plot(kind='scatter', color="green", marker="o", s=20,
x=independent, y='candela_per_m2',
logx=logx, ax=ax, label="Green"))
(df[df.Kryptonite_type == "Gold"]
.plot(kind='scatter', color="goldenrod", marker="s", s=30,
x=independent, y='candela_per_m2',
logx=logx, ax=ax, label="Gold"))
(df[df.Kryptonite_type == "Red"]
.plot(kind='scatter', color="darkred", marker="x", s=25,
x=independent, y='candela_per_m2',
logx=logx, ax=ax, label="Red"))
ax.legend()
title = f"Luminance response of kryptonite types by {independent}"
if imputed:
title = f"{title} (imputed)"
ax.set_title(title)
plt.savefig(f"img/{title}.png")
DTI = pd.DatetimeIndex
date_series = pd.Series(
[-10, 1, 2, np.nan, 4],
index=DTI(['2001-01-01',
'2001-01-05',
'2001-01-10',
'2001-02-01',
'2001-02-05']))
def plot_filled_trend(s=None):
n = len(s)
line = pd.Series(np.linspace(s[0], s[-1], n),
index=s.index)
filled = s.fillna(pd.Series(line))
plt.plot(range(n), filled.values, 'o', range(n), line, ":")
plt.grid(axis='y', color='lightgray', linewidth=0.5)
plt.xticks(range(n),
labels=['Actual', 'Actual',
'Actual', 'Imputed (Feb 1)',
'Actual'])
plt.plot([3], [3.69], 'x',
label="Time interpolated value")
plt.legend()
title = "Global imputation from linear trend"
plt.title(title)
plt.savefig(f"img/{title}.png")
philly = 'data/philly_houses.json'
def make_philly_missing(fname=philly):
np.random.seed(1)
out = fname.replace('houses', 'missing')
rows = json.load(open(fname))['rows']
blank = np.random.randint(0, len(rows), 1000)
for num in blank:
rows[num]['market_value'] = np.nan
json.dump(rows, open(out, 'w'))
return rows
def make_cars(fname='data/cars/car.data'):
cars = pd.read_csv(fname, header=None,
names=["price_buy",
"price_maintain",
"doors",
"passengers",
"trunk",
"safety",
"rating"])
price = {'vhigh': 3, 'high': 2,
'med': 1, 'low': 0}
cars['price_buy'] = cars.price_buy.map(price)
cars['price_maintain'] = cars.price_maintain.map(price)
cars['doors'] = cars.doors.map(
{'2': 2, '3': 3,
'4': 4, '5more': 5})
cars['passengers'] = cars.passengers.map(
{'2': 2, '4': 4,
'more': 6})
cars['trunk'] = cars.trunk.map(
{'small': 0, 'med': 1,
'big': 2})
cars['safety'] = cars.safety.map(
{'low': 0, 'med': 1,
'high': 2})
cars['rating'] = cars.rating.map(
{'unacc': "Unacceptable",
'acc': "Acceptable",
'good': "Good",
'vgood': "Very Good"})
cars = cars.sample(frac=1, random_state=1)
cars.to_csv('data/cars.csv', index=False)
def make_hair_lengths(df):
assert len(df) == 25_000
np.random.seed(1)
s = np.random.beta(a=1.1, b=5, size=25000) * 147
negate = np.random.randint(0, 25_000, 100)
s[negate] *= -1
neg_one = np.random.randint(0, 25_000, 20)
s[neg_one] = -1
zero = np.random.randint(0, 25_000, 500)
s[zero] = 0
s = np.round(s, 1)
df['Hair_Length'] = s
plt.hist(s, 50, density=True)
return df
def add_typos(df):
from random import random, randrange
fnames = list(df.Name)
for n, name in enumerate(fnames):
r = random()
letters = list(name)
pos = randrange(0, len(name))
if r < 0.002: # Delete a letter
print("Deleting letter from", name)
del letters[pos]
fnames[n] = ''.join(letters)
elif r < 0.004: # Add a letter ('e')
print("Adding letter to", name)
letters.insert(pos, 'e')
fnames[n] = ''.join(letters)
elif r < 0.006: # Swap adjacent letters
print("Swapping letters in", name)
letters[pos], letters[pos-1] = letters[pos-1], letters[pos]
fnames[n] = ''.join(letters)
df['Name'] = fnames
# Make a synthetic collection of names/ages/other
# based on the actual SSA name frequency
def make_ssa_synthetic(fname='data/Baby-Names-SSA.csv'):
# Repeatable random
import random
random.seed(1)
# Date for age calculation
now = 2020
# Population adjustment by year in 20Ms
population = np.linspace(5, 16, 100)
years = np.linspace(1919, 2018, 100, dtype=int)
year_to_pop = dict(zip(years, population))
# Rank to count
rank_to_freq = {'1': 1.0, '2': 0.9, '3': 0.8,
'4': 0.7, '5': 0.6}
# Read the rank popularity of names by year
df = pd.read_csv('data/Baby-Names-SSA.csv')
df = df.set_index('Year').sort_index()
unstack = df.unstack()
# Random features
colors = ('Red', 'Green', 'Blue',
'Yellow', 'Purple', 'Black')
flowers = ('Daisy', 'Orchid', 'Rose',
'Violet', 'Lily')
# Python list-of-lists to construct new data
rows = []
for (rank_gender, year), name in unstack.iteritems():
rank, gender = rank_gender
age = now - year
count = int(year_to_pop[year] *
rank_to_freq[rank])
for _ in range(count):
color = random.choice(colors)
flower = random.choice(flowers)
rows.append(
(age, gender, name, color, flower))
df = | pd.DataFrame(rows) | pandas.DataFrame |
# This script is part of pyroglancer (https://github.com/SridharJagannathan/pyroglancer).
# Copyright (C) 2020 <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""Module contains functions to handle synapse data."""
import json
import navis
import neuroglancer
import os
import pandas as pd
import pymaid
import struct
def commit_info(synapseinfo, path, synapsetype):
"""Commit the info file created for the synapses based on precomputed format.
Parameters
----------
synapseinfo : dict
info in json format for the synapses
path: str
local path of the precomputed hosted layer.
synapsetype : str
pre or postsynapses
"""
synapsefilepath = path + '/' + synapsetype
if not os.path.exists(synapsefilepath):
os.makedirs(synapsefilepath)
print('creating:', synapsefilepath)
infofile = os.path.join(synapsefilepath, 'info')
with open(infofile, 'w') as f:
json.dump(synapseinfo, f)
def create_synapseinfo(dimensions, path):
"""Create info file for the synapse based precomputed format.
Parameters
----------
path: str
local path of the precomputed hosted layer.
dimensions: neuroglancer.CoordinateSpace
object of neuroglancer coordinate space class.
"""
synapseinfo = {
'@type': 'neuroglancer_annotations_v1',
"annotation_type": "POINT",
"by_id": {
"key": "by_id"
},
"dimensions": {
"x": [dimensions['x'].scale, dimensions['x'].unit],
"y": [dimensions['y'].scale, dimensions['y'].unit],
"z": [dimensions['z'].scale, dimensions['z'].unit]
},
"lower_bound": [0, 0, 0],
"properties": [],
"relationships": [],
"spatial": [
{
"chunk_size": [34422, 37820, 41362],
"grid_shape": [1, 1, 1],
"key": "spatial0",
"limit": 10000
}
],
"upper_bound": [34422, 37820, 41362]
}
synapseinfo["relationships"] = [{"id": "presynapses_cell",
"key": "presynapses_cell"}]
print('synapses info path:', path)
commit_info(synapseinfo, path, synapsetype='presynapses')
synapseinfo["relationships"] = [{"id": "postsynapses_cell",
"key": "postsynapses_cell"}]
commit_info(synapseinfo, path, synapsetype='postsynapses')
return path
def put_synapsefile(path, synapsetype, synapses, skeletonid):
"""Put synapse in the local dataserver.
Parameters
----------
path: str
local path of the precomputed hosted layer.
synapsetype : str
pre or postsynapses
synapses : dataframe
contains 'x', 'y', 'z' columns
skeletonid : int
skeleton id to be associated with the corresponding synapse
pointsname : str
name for the points (not yet implemented)
"""
synapsefilepath = path + '/' + synapsetype + '/' + synapsetype + '_cell/'
if not os.path.exists(synapsefilepath):
os.makedirs(synapsefilepath)
# print('creating:', synapsefilepath)
synapsefile = os.path.join(synapsefilepath, str(skeletonid))
print('making:', synapsefile)
synapselocs = synapses[['x', 'y', 'z']].values/1000
# implementation based on logic suggested by https://github.com/google/neuroglancer/issues/227
with open(synapsefile, 'wb') as outputbytefile:
total_synapses = len(synapselocs) # coordinates is a list of tuples (x,y,z)
buffer = struct.pack('<Q', total_synapses)
for (x, y, z) in synapselocs:
synapsepoint = struct.pack('<3f', x, y, z)
buffer += synapsepoint
# write the ids of the individual points at the very end..
synapseid_buffer = struct.pack('<%sQ' % len(synapselocs), *range(len(synapselocs)))
buffer += synapseid_buffer
outputbytefile.write(buffer)
def upload_synapses(x, path):
"""Upload synpases from a neuron or neuronlist.
Parameters
----------
x : CatmaidNeuron | CatmaidNeuronList or TreeNeuron | NeuronList
neuron or neuronlist of different formats
path: str
local path of the precomputed hosted layer.
"""
if isinstance(x, pymaid.core.CatmaidNeuron):
neuronlist = pymaid.core.CatmaidNeuronList(x)
elif isinstance(x, navis.core.TreeNeuron):
neuronlist = navis.core.NeuronList(x)
elif (isinstance(x, pymaid.core.CatmaidNeuronList) or isinstance(x, navis.core.NeuronList)):
neuronlist = x
else:
raise TypeError(f'Expected neuron or neuronlist, got "{type(x)}"')
for neuronidx in range(len(neuronlist)):
neuronelement = neuronlist[neuronidx]
presynapses = neuronelement.presynapses
postsynapses = neuronelement.postsynapses
print('Adding neuron: ', neuronelement.id)
put_synapsefile(path, 'presynapses', presynapses, neuronelement.id)
put_synapsefile(path, 'postsynapses', postsynapses, neuronelement.id)
def annotate_synapses(ngviewer, dimensions, x):
"""Annotate postsynapses of a neuron/neuronlist. (defunct do not use..).
This function annotates synapses of a neuron/neuronlist
Parameters
----------
ngviewer : ng.viewer.Viewer
object of Neuroglancer viewer class.
dimensions: neuroglancer.CoordinateSpace
object of neuroglancer coordinate space class.
x : CatmaidNeuron | CatmaidNeuronList or TreeNeuron | NeuronList
neuron or neuronlist of different formats
Returns
-------
status : bool
annotation status
"""
if isinstance(x, pymaid.core.CatmaidNeuron):
neuronlist = pymaid.core.CatmaidNeuronList(x)
elif isinstance(x, navis.core.TreeNeuron):
neuronlist = navis.core.NeuronList(x)
elif (isinstance(x, pymaid.core.CatmaidNeuronList) or isinstance(x, navis.core.NeuronList)):
neuronlist = x
else:
raise TypeError(f'Expected neuron or neuronlist, got "{type(x)}"')
skeldatasegidlist = []
for neuron in neuronlist:
skeldatasegidlist.append(neuron.id)
# postsynapses first..
with ngviewer.txn() as s:
s.layers.append(
name="post_synapses",
layer=neuroglancer.LocalAnnotationLayer(
dimensions=dimensions,
annotation_relationships=['post_synapses'],
linked_segmentation_layer={'post_synapses': 'skeletons'},
filter_by_segmentation=['post_synapses'],
ignore_null_segment_filter=False,
annotation_properties=[
neuroglancer.AnnotationPropertySpec(
id='color',
type='rgb',
default='blue',
)
],
shader='''
void main() {
setColor(prop_color());
setPointMarkerSize(5.0);
}
''',
))
with ngviewer.txn() as s:
for neuronidx in range(len(neuronlist)):
neuronelement = neuronlist[neuronidx]
postsynapses = neuronelement.postsynapses
postsynapses = postsynapses.reset_index()
for index, postsyn in postsynapses.iterrows():
s.layers['post_synapses'].annotations.append(
neuroglancer.PointAnnotation(
id=str(index),
point=[postsyn.x/1000, postsyn.y/1000, postsyn.z/1000],
segments=[[skeldatasegidlist[neuronidx]]],
props=['#0000ff'],
)
)
# presynapses next..
with ngviewer.txn() as s:
s.layers.append(
name="pre_synapses",
layer=neuroglancer.LocalAnnotationLayer(
dimensions=dimensions,
annotation_relationships=['pre_synapses'],
linked_segmentation_layer={'pre_synapses': 'skeletons'},
filter_by_segmentation=['pre_synapses'],
ignore_null_segment_filter=False,
annotation_properties=[
neuroglancer.AnnotationPropertySpec(
id='color',
type='rgb',
default='red',
)
],
shader='''
void main() {
setColor(prop_color());
setPointMarkerSize(5.0);
}
''',
))
with ngviewer.txn() as s:
for neuronidx in range(len(neuronlist)):
neuronelement = neuronlist[neuronidx]
presynapses = neuronelement.presynapses
presynapses = presynapses.reset_index()
for index, presyn in presynapses.iterrows():
s.layers['pre_synapses'].annotations.append(
neuroglancer.PointAnnotation(
id=str(index),
point=[presyn.x/1000, presyn.y/1000, presyn.z/1000],
segments=[[skeldatasegidlist[neuronidx]]],
props=['#ff0000'],
)
)
status = True
return status
def synapses2nodepoints(x, layer_scale):
"""Generate nodepoints (point A, point B) from synapses for given neuron(s).
Parameters
----------
x : CatmaidNeuron | CatmaidNeuronList or TreeNeuron | NeuronList
neuron or neuronlist of different formats
layer_scale : int | float
scaling from voxel to native space in 'x', 'y', 'z'
Returns
-------
synapsepointscollec_df : dataframe
contains synapse points in point A - point B format used in flywire annotations.
"""
if isinstance(x, pymaid.core.CatmaidNeuron):
x = pymaid.core.CatmaidNeuronList(x)
elif isinstance(x, navis.core.TreeNeuron):
x = navis.core.NeuronList(x)
elif (isinstance(x, pymaid.core.CatmaidNeuronList) or isinstance(x, navis.core.NeuronList)):
pass
else:
raise TypeError(f'Expected neuron or neuronlist, got "{type(x)}"')
synapsepointscollec_df = []
for neuronelement in x:
presynapses = neuronelement.presynapses
postsynapses = neuronelement.postsynapses
presyn_pt = presynapses[['x', 'y', 'z']].values
postsyn_pt = postsynapses[['x', 'y', 'z']].values
# scale the points incase it is in voxel coordinates..
presyn_pt = presyn_pt / layer_scale
postsyn_pt = postsyn_pt / layer_scale
pre_syn_df = pd.DataFrame(pd.Series(presyn_pt.tolist()), columns=['presyn_pt'])
post_syn_df = pd.DataFrame(pd.Series(postsyn_pt.tolist()), columns=['postsyn_pt'])
synapsepointscollec_df.append([neuronelement.id, pre_syn_df, post_syn_df])
synapsepointscollec_df = | pd.DataFrame(synapsepointscollec_df, columns=['id', 'pre_syn_df', 'post_syn_df']) | pandas.DataFrame |
"""
Download COVID-19 data from NY Times
"""
import numpy as np
import pandas as pd
from database import DataBase
from wrangler import (
US_MAP_TABLE,
STATE_MAP_TABLE
)
from sql import (
COUNTIES_VIEW,
DROP_COUNTIES_VIEW,
STATES_VIEW,
DROP_STATES_VIEW,
US_MAP_PIVOT_VIEW,
DROP_US_MAP_PIVOT_VIEW,
CREATE_OPTIONS_TABLE,
INSERT_USA_OPTION,
DROP_OPTIONS_TABLE
)
# inputs
US_COUNTIES_TABLE = 'us_counties'
US_STATES_TABLE = 'us_states'
# outputs
NYTIMES_COUNTIES_TABLE = 'nytimes_counties'
NYTIMES_STATES_TABLE = 'nytimes_states'
LEVELS_TABLE = 'levels'
DATES_TABLE = 'dates'
# levels to map cases and deaths
# LEVELS = [0, 1, 10, 100, 250, 500, 5000, 10000, np.inf]
LEVELS = [0, 1, 500, 1000, 2500, 5000, 10000, 20000, np.inf]
URL_COUNTIES = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
URL_STATES = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv'
def download_nytimes():
"""Read NY Times data from github
"""
# read covid19 county by county data from url
data = pd.read_csv(URL_COUNTIES, dtype={'fips': 'str'})
_db = DataBase()
_db.add_table(US_COUNTIES_TABLE, data, index=False)
_db.close()
# read covid19 state by state data from url
data = | pd.read_csv(URL_STATES, dtype={'fips': 'str'}) | pandas.read_csv |
__author__ = "<NAME>"
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import lightgbm as lgb
df = | pd.read_csv("../data/localData/newCasesWithClass_Interpolated.csv") | pandas.read_csv |
from collections import defaultdict
from datetime import datetime
import os
import pandas as pd
from tqdm import tqdm
import time
import pickle
import argparse
import csv
import utils
import numpy as np
def read_demo(DATA_DIR, patient_list=None):
"""
df_demo['SEX'].value_counts():
F 33498
M 26692
UN 2
df_demo['RACE'].value_counts():
05 29526 05=White
OT 10726 OT=Other
UN 9403 UN=Unknown
03 9312 03=Black or African American
02 477 02=Asian
06 395 06=Multiple race
NI 152 NI=No information
01 99 01=American Indian or Alaska Native
07 74 07=Refuse to answer
04 28 04=Native Hawaiian or Other Pacific Islander
data top lines:
PATID,BIRTH_DATE,BIRTH_TIME,SEX,SEXUAL_ORIENTATION,GENDER_IDENTITY,HISPANIC,RACE,BIOBANK_FLAG,PAT_PREF_LANGUAGE_SPOKEN,RAW_SEX,RAW_SEXUAL_ORIENTATION,RAW_GENDER_IDENTITY,RAW_HISPANIC,RAW_RACE,RAW_PAT_PREF_LANGUAGE_SPOKEN,UPDATED,SOURCE,ZIP_CODE,LAST_ENCOUNTERID
11e75060a017514ea3fa0050569ea8fb,1928-09-14,NULL,F,NULL,NULL,N,03,N,NULL,F,NULL,NULL,B,B,NULL,2020-06-01 14:15:29,FLM,338534901,YvJLcr8CS1WhqgO/f/UW 2017-02-12
11e75060a49a95e6bb6a0050569ea8fb,1936-09-11,NULL,F,NULL,NULL,N,03,N,NULL,F,NULL,NULL,B,B,NULL,2020-06-01 14:15:29,FLM,349472302,YvJLcr0CS1SqrgS7f/gZ 2015-08-06
11e75060a5eb4d8cbc3a0050569ea8fb,1932-04-25,04:00,F,NI,NI,N,05,N,ENG,30000000|362,NULL,NULL,30000000|312507,30000000|309316,30000000|151,2019-08-29 15:47:23,AVH,327017752,NULL
11e75060a8f9918c929b0050569ea8fb,1942-06-12,NULL,F,NULL,NULL,N,03,N,NULL,F,NULL,NULL,B,B,NULL,2020-06-01 14:15:29,FLM,34220,YvJLcrECS1etrQC7e/Yd 2019-12-13
11e75060ab1a545688ed0050569ea8fb,1952-09-14,00:00,F,NULL,NULL,N,05,N,ENG,Female,NULL,NULL,Not Hispanic or Latino,White,ENGLISH,2020-06-02 19:46:50,LNK,32207-5787,"cfhOcL0HT1aprQI= 2020-02-19
"
11e75060acc6e09ea7850050569ea8fb,1977-05-06,00:00,F,NULL,NULL,N,05,N,ENG,Female,NULL,NULL,Not Hispanic or Latino,White,ENGLISH,2020-06-02 19:47:02,LNK,32246-3767,"cfhOcL0LQl+gogE= 2020-04-30
"
11e75060b17a2f6aa7850050569ea8fb,1952-04-10,00:00,F,NI,NI,N,03,N,ENG,F,NULL,NULL,B,B,ENGLISH,2020-06-01 14:15:29,LNK,322443700,"YvJLcr0CS1Ksowe8f/Ae 2015-06-01
"
:param DATA_DIR:
:param file_name:
:return: id_demo[patid] = (sex, bdate, race, zipcode)
"""
print('read_demo...')
path = os.path.join(DATA_DIR, 'DEMOGRAPHIC.csv')
df_demo = pd.read_csv(path, skiprows=[1]) # new added skiprows=[1], new data 1st -------
id_demo = {}
if patient_list is not None:
patient_list = set(patient_list)
for index, row in tqdm(df_demo.iterrows(), total=len(df_demo)):
patid = row['PATID']
sex = 0 if row['SEX'] == 'F' else 1 # Female 0, Male, and UN 1
bdate = utils.str_to_datetime(row['BIRTH_DATE']) # datetime(*list(map(int, row['BIRTH_DATE'].split('-')))) # year-month-day
# birth_year = bdate.year
try:
race = int(row['RACE'])
except ValueError:
race = 0 # denote all OT, UN, NI as 0
zipcode = row['ZIP_CODE']
if (patient_list is None) or (patid in patient_list):
id_demo[patid] = (bdate, sex, race, zipcode)
return id_demo
def build_patient_dates(DATA_DIR):
"""
# Start date: the first date in the EHR database
# Initiation date: The first date when patients were diagnosed with MCI.
# Index date: the date of the first prescription of the assigned drug (can not determine here. need
# mci_drug_taken_by_patient_from_dispensing_plus_prescribing later)
# last date: last date of drug, or diagnosis? Use drug time in the cohort building and selection code part
Caution:
# First use DX_DATE, then ADMIT_DATE, then discard record
# Only add validity check datetime(1990,1,1)<date<datetime(2030,1,1) for updating 1st and last diagnosis date
# Input
:param DATA_DIR:
:return:
"""
print("******build_patient_dates*******")
df_demo = pd.read_csv(os.path.join(DATA_DIR, 'DEMOGRAPHIC.csv'), skiprows=[1])
# 0-birth date
# 1-start diagnosis date, 2-initial mci diagnosis date,
# 3-first AD diagnosis date, 4-first dementia diagnosis date
# 5-first ADRD diagnosis, 6-last diagnosis
patient_dates = {pid: [utils.str_to_datetime(bdate), np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
for (pid, bdate) in zip(df_demo['PATID'], df_demo['BIRTH_DATE'])}
n_records = 0
n_no_date = 0
min_date = datetime.max
max_date = datetime.min
with open(os.path.join(DATA_DIR, 'DIAGNOSIS.csv'), 'r') as f: # 'MCI_DIANGOSIS.csv'
col_name = next(csv.reader(f)) # read from first non-name row, above code from 2nd, wrong
for row in tqdm(csv.reader(f)): # , total=34554738):
n_records += 1
patid, ad_date, dx, dx_type = row[1], row[4], row[6], row[7]
dx_date = row[8]
if patid.startswith('--'):
print('Skip {}th row: {}'.format(n_records, row))
continue
# # datetime(*list(map(int, date.split('-'))))
# First use ADMIT_DATE , then DX_DATE, then discard record
if (ad_date != '') and (ad_date != 'NULL'):
date = utils.str_to_datetime(ad_date)
elif (dx_date != '') and (dx_date != 'NULL'):
date = utils.str_to_datetime(dx_date)
else:
n_no_date += 1
continue
if date > max_date:
max_date = date
if date < min_date:
min_date = date
# 1-start diagnosis date
if pd.isna(patient_dates[patid][1]) or date < patient_dates[patid][1]:
if datetime(1990, 1, 1) < date < datetime(2030, 1, 1):
patient_dates[patid][1] = date
# 2-initial mci diagnosis date
if utils.is_mci(dx):
if pd.isna(patient_dates[patid][2]) or date < patient_dates[patid][2]:
patient_dates[patid][2] = date
# 3-firs AD diagnosis date
if utils.is_AD(dx):
if pd.isna(patient_dates[patid][3]) or date < patient_dates[patid][3]:
patient_dates[patid][3] = date
# 4-first dementia diagnosis date
if utils.is_dementia(dx):
if pd.isna(patient_dates[patid][4]) or date < patient_dates[patid][4]:
patient_dates[patid][4] = date
# 5-first AD or dementia diagnosis date
if utils.is_AD(dx) or utils.is_dementia(dx):
if pd.isna(patient_dates[patid][5]) or date < patient_dates[patid][5]:
patient_dates[patid][5] = date
# 6-last diagnosis
if pd.isna(patient_dates[patid][6]) or date > patient_dates[patid][6]:
if datetime(1990, 1, 1) < date < datetime(2030, 1, 1):
patient_dates[patid][6] = date
print('len(patient_dates)', len(patient_dates))
print('n_records', n_records)
print('n_no_date', n_no_date)
pickle.dump(patient_dates,
open('pickles/patient_dates.pkl', 'wb'))
df = pd.DataFrame(patient_dates).T
df = df.rename(columns={0: 'birth_date',
1: '1st_diagnosis_date',
2: '1st_mci_date',
3: '1st_AD_date',
4: '1st_dementia_date',
5: '1st_ADRD_date',
6: 'last_diagnosis_date'})
idx_ADRD = pd.notna(df['1st_ADRD_date'])
df.loc[idx_ADRD, 'MCI<ADRD'] = df.loc[idx_ADRD, '1st_mci_date'] < df.loc[idx_ADRD, '1st_ADRD_date']
idx_AD = pd.notna(df['1st_AD_date'])
df.loc[idx_AD, 'MCI<AD'] = df.loc[idx_AD, '1st_mci_date'] < df.loc[idx_AD, '1st_AD_date']
idx_RD = pd.notna(df['1st_dementia_date'])
df.loc[idx_RD, 'Dementia<AD'] = df.loc[idx_RD, '1st_mci_date'] < df.loc[idx_RD, '1st_dementia_date']
df.to_csv('debug/patient_dates.csv')
print('dump done')
return patient_dates
def plot_MCI_to_ADRD():
import matplotlib.pyplot as plt
import pandas as pd
pdates = pd.read_csv(os.path.join('debug', 'patient_dates.csv'))
idx = | pd.notna(pdates['1st_ADRD_date']) | pandas.notna |
from datetime import datetime
import numpy as np
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas._tseries import Timestamp
import pandas._tseries as lib
#---------------
# Period logic
def to_period(arg, freq=None):
""" Attempts to convert arg to timestamp """
if arg is None:
return arg
if type(arg) == float:
raise TypeError("Cannot convert a float to period")
return Period(arg, freq=freq)
class Period(object):
def __init__(self, value=None, freq=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
if year is None:
raise ValueError("If value is None, year cannot be None")
if quarter is not None:
month = (quarter - 1) * 3 + 1
base, mult = _gfc(freq)
self.ordinal = lib.period_ordinal(year, month, day, hour, minute,
second, base, mult)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring):
value = value.upper()
dt, parsed, reso = parse_time_string(value)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Could not infer frequency for period")
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, (int, long)):
if value <= 0:
raise ValueError("Value must be positive")
self.ordinal = value
if freq is None:
raise ValueError('Must supply freq for ordinal value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if self.ordinal is None:
self.ordinal = lib.period_ordinal(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, base, mult)
self.freq = _freq_mod._get_freq_str(base, mult)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and | _gfc(self.freq) | pandas.tseries.frequencies.get_freq_code |
import pandas as pd
import numpy as np
# load dataset
def wrangle_data_fun():
path_to_data = "model/airline-safety.csv"
data = | pd.read_csv(path_to_data) | pandas.read_csv |
# fbs_allocation.py (flowsa)
# !/usr/bin/env python3
# coding=utf-8
"""
Functions to allocate data using additional data sources
"""
import numpy as np
import pandas as pd
import flowsa
from flowsa.common import fba_activity_fields, fbs_activity_fields, \
fba_mapped_wsec_default_grouping_fields, fba_wsec_default_grouping_fields, \
check_activities_sector_like, return_bea_codes_used_as_naics
from flowsa.location import US_FIPS
from flowsa.schema import activity_fields
from flowsa.settings import log
from flowsa.validation import check_allocation_ratios, \
check_if_location_systems_match
from flowsa.flowbyfunctions import collapse_activity_fields, \
sector_aggregation, sector_disaggregation, subset_df_by_geoscale, \
load_fba_w_standardized_units, aggregator
from flowsa.allocation import allocate_by_sector, proportional_allocation_by_location_and_activity, \
equally_allocate_parent_to_child_naics, equal_allocation
from flowsa.sectormapping import get_fba_allocation_subset, add_sectors_to_flowbyactivity
from flowsa.dataclean import replace_strings_with_NoneType
from flowsa.validation import check_if_data_exists_at_geoscale
def direct_allocation_method(fbs, k, names, method):
"""
Directly assign activities to sectors
:param fbs: df, FBA with flows converted using fedelemflowlist
:param k: str, source name
:param names: list, activity names in activity set
:param method: dictionary, FBS method yaml
:return: df with sector columns
"""
log.info('Directly assigning activities to sectors')
# for each activity, if activities are not sector like,
# check that there is no data loss
if check_activities_sector_like(fbs) is False:
activity_list = []
n_allocated = []
for n in names:
# avoid double counting by dropping n from the df after calling on
# n, in the event both ACB and APB values exist
fbs = fbs[~((fbs[fba_activity_fields[0]].isin(n_allocated)) |
(fbs[fba_activity_fields[1]].isin(n_allocated))
)].reset_index(drop=True)
log.debug('Checking for %s at target sector level', n)
fbs_subset = \
fbs[(fbs[fba_activity_fields[0]] == n) |
(fbs[fba_activity_fields[1]] == n)].reset_index(drop=True)
activity_list.append(fbs_subset)
n_allocated.append(n)
fbs = | pd.concat(activity_list, ignore_index=True) | pandas.concat |
import ast
import os
from datetime import timedelta
import pandas as pd
import requests
from dateutil.parser import parse
from tqdm import tqdm
from src import BASEDATE, DATADIR
def load_stations_metadata() -> pd.DataFrame:
"""Load the stations metadata."""
stations_api = "https://rata.digitraffic.fi/api/v1/metadata/stations"
df = pd.read_json(requests.get(stations_api).text)
return df
def load_trains_last_30_days() -> pd.DataFrame:
"""Load the trains dataset for the last 30 days."""
last_30_days = [
(BASEDATE - timedelta(days=i)).strftime("%Y-%m-%d") for i in range(30)
]
# Call the endpoint for each of the departure dates we want
data = []
for dep_date in tqdm(last_30_days):
day_api = f"https://rata.digitraffic.fi/api/v1/trains/{dep_date}"
data.append(pd.read_json(requests.get(day_api).text))
# Concatenate data from last 30 days
df = pd.concat(data).reset_index(drop=True)
return df
def prep_trains_last_30_days(df) -> pd.DataFrame:
"""Prepare the trains dataset for the last 30 days."""
def _compute_duration(rows):
if (
rows[0].get("actualTime") is not None
and rows[-1].get("actualTime") is not None
):
return parse(rows[-1]["actualTime"]) - parse(rows[0]["actualTime"])
else:
return parse(rows[-1]["scheduledTime"]) - parse(rows[0]["scheduledTime"])
def _extract_route_embedding(rows):
non_consecutive_rows = [
j["stationShortCode"]
for i, j in enumerate(rows)
if j["stationShortCode"] != rows[i - 1]["stationShortCode"] or i == 0
]
return "-".join(non_consecutive_rows)
# Feature engineering
df["numberStopedStations"] = (
df["timeTableRows"]
.apply(
lambda x: (len(list(filter(lambda y: y["trainStopping"], x))) - 2) / 2 + 2
)
.astype(int)
)
df["trainDuration"] = df["timeTableRows"].apply(_compute_duration)
df["routeEmbedding"] = df["timeTableRows"].apply(_extract_route_embedding)
return df
def load_cleaned_trains() -> pd.DataFrame:
"""Load the cleaned trains dataset."""
datafile = os.path.join(DATADIR, "trains_cleaned.csv")
# Load datafile form disk if it exists else create it
if os.path.isfile(datafile):
# Specify how to load each column
converter = {"timeTableRows": lambda x: ast.literal_eval(x)}
df = pd.read_csv(
datafile,
converters=converter,
parse_dates=["timetableAcceptanceDate"],
)
df["trainDuration"] = | pd.to_timedelta(df["trainDuration"]) | pandas.to_timedelta |
"""
SparseArray data structure
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
import pandas as pd
from pandas.core.base import PandasObject
from pandas import compat
from pandas.compat import range
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import (
ABCSparseArray, ABCSparseSeries)
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_float, is_integer,
is_integer_dtype,
is_bool_dtype,
is_list_like,
is_string_dtype,
is_scalar, is_dtype_equal)
from pandas.core.dtypes.cast import (
maybe_convert_platform, maybe_promote,
astype_nansafe, find_common_type)
from pandas.core.dtypes.missing import isnull, notnull, na_value_for_dtype
import pandas._libs.sparse as splib
from pandas._libs.sparse import SparseIndex, BlockIndex, IntIndex
from pandas._libs import index as libindex
import pandas.core.algorithms as algos
import pandas.core.ops as ops
import pandas.io.formats.printing as printing
from pandas.util._decorators import Appender
from pandas.core.indexes.base import _index_shared_docs
_sparray_doc_kwargs = dict(klass='SparseArray')
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: %d vs. %d" %
(len(self), len(other)))
if not isinstance(other, ABCSparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, name)
elif is_scalar(other):
with np.errstate(all='ignore'):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
return _wrap_result(name, result, self.sp_index, fill)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _get_fill(arr):
# coerce fill_value to arr dtype if possible
# int64 SparseArray can have NaN as fill_value if there is no missing
try:
return np.asarray(arr.fill_value, dtype=arr.dtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name, series=False):
if series and is_integer_dtype(left) and is_integer_dtype(right):
# series coerces to float64 if result should have NaN/inf
if name in ('floordiv', 'mod') and (right.values == 0).any():
left = left.astype(np.float64)
right = right.astype(np.float64)
elif name in ('rfloordiv', 'rmod') and (left.values == 0).any():
left = left.astype(np.float64)
right = right.astype(np.float64)
# dtype used to find corresponding sparse method
if not is_dtype_equal(left.dtype, right.dtype):
dtype = find_common_type([left.dtype, right.dtype])
left = left.astype(dtype)
right = right.astype(dtype)
else:
dtype = left.dtype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(all='ignore'):
result = op(left.get_values(), right.get_values())
fill = op(_get_fill(left), _get_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_get_fill(left), _get_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.format(name=name, dtype=dtype)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = getattr(splib, opname)
with np.errstate(all='ignore'):
result, index, fill = sparse_op(left_sp_values, left.sp_index,
left.fill_value, right_sp_values,
right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
""" wrap op result to have correct dtype """
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype)
class SparseArray(PandasObject, np.ndarray):
"""Data structure for labeled, sparse floating point 1-D data
Parameters
----------
data : {array-like (1-D), Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseArray objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
__array_priority__ = 15
_typ = 'array'
_subtyp = 'sparse_array'
sp_index = None
fill_value = None
def __new__(cls, data, sparse_index=None, index=None, kind='integer',
fill_value=None, dtype=None, copy=False):
if index is not None:
if data is None:
data = np.nan
if not is_scalar(data):
raise Exception("must only pass scalars with an index ")
values = np.empty(len(index), dtype='float64')
values.fill(data)
data = values
if isinstance(data, ABCSparseSeries):
data = data.values
is_sparse_array = isinstance(data, SparseArray)
if dtype is not None:
dtype = np.dtype(dtype)
if is_sparse_array:
sparse_index = data.sp_index
values = data.sp_values
fill_value = data.fill_value
else:
# array-like
if sparse_index is None:
if dtype is not None:
data = np.asarray(data, dtype=dtype)
res = make_sparse(data, kind=kind, fill_value=fill_value)
values, sparse_index, fill_value = res
else:
values = _sanitize_values(data)
if len(values) != sparse_index.npoints:
raise AssertionError("Non array-like type {0} must have"
" the same length as the"
" index".format(type(values)))
# Create array, do *not* copy data by default
if copy:
subarr = np.array(values, dtype=dtype, copy=True)
else:
subarr = np.asarray(values, dtype=dtype)
# Change the class of the array to be the subclass type.
return cls._simple_new(subarr, sparse_index, fill_value)
@classmethod
def _simple_new(cls, data, sp_index, fill_value):
if not isinstance(sp_index, SparseIndex):
# caller must pass SparseIndex
raise ValueError('sp_index must be a SparseIndex')
if fill_value is None:
if sp_index.ngaps > 0:
# has missing hole
fill_value = np.nan
else:
fill_value = | na_value_for_dtype(data.dtype) | pandas.core.dtypes.missing.na_value_for_dtype |
import sys
import argparse
import torch
import csv
import pandas as pd
from torchtext.data.functional import generate_sp_model
import params
from rcnn import RCNN
from train import *
from dataset import *
##data path
train_df_path = params.train_df
test_df_path = params.test_df
val_df_path = params.val_df
def train_sentencepiece(df_path):
"""
function to train sentence piece on training and validation data
df (str): path to the csv
"""
#clean the dataset
def clean(text):
"""
params:
text (str)
returns:
clean text
"""
text = text.lower()
letters = string.ascii_lowercase
not_letters = set([char_ for char_ in text if char_ not in letters and char_ != ' '])
for char in not_letters:
text = text.replace(char, " ")
return text
df = | pd.read_csv(df_path) | pandas.read_csv |
import argparse
import pandas as pd
from collections import Counter
import numpy as np
import datetime
def run():
parser = argparse.ArgumentParser()
# input files
parser.add_argument('--metadata')
parser.add_argument('--delim', default='\t')
parser.add_argument('--dateCol', default=1, type=int)
parser.add_argument('--groupCol', default=0, type=int)
parser.add_argument('--timeGroup', default='week')
parser.add_argument('--nPerGroup', type=int, default=1)
parser.add_argument('--nReps', type=int, default=1)
args = parser.parse_args()
#args.metadata = 'data/19B_subclade/19B_subclade_beast_include.tsv'
#args.nPerGroup = 2
#args.groupCol = 7
#args.dateCol = 2
dat = pd.read_csv(args.metadata, sep=args.delim, header=None)
print(dat)
dat[args.dateCol] = pd.to_datetime(dat[args.dateCol])
if args.timeGroup.lower() == 'day':
dat['end_date'] = pd.to_datetime(dat[args.dateCol])
elif args.timeGroup.lower() == 'week':
dat['end_date'] = \
pd.to_datetime(dat[args.dateCol]).apply(lambda k:
k+datetime.timedelta(days= 6 - k.weekday()))
elif args.timeGroup.lower() == 'month':
dat['end_date'] = \
| pd.to_datetime(dat[args.dateCol]) | pandas.to_datetime |
import numpy as np
import pandas as pd
from glob import glob
from scipy import stats
from scipy.stats import pearsonr
from sklearn.preprocessing import OneHotEncoder
def compute_corrs(X, Y):
""" Compute Pearson correlation between each column in X
and each column in Y. """
corrs = np.zeros((X.shape[1], Y.shape[1])) # 33 x 7
pvals = np.zeros_like(corrs)
for au_i in range(X.shape[1]):
for emo_i in range(Y.shape[1]):
corrs[au_i, emo_i], pvals[au_i, emo_i] = pearsonr(X[:, au_i], Y[:, emo_i])
corrs[np.isnan(corrs)] = 0
return corrs, pvals
# Load in data and split in AUs and emo ratings
#files = sorted(glob('data/ratings/sub*.tsv'))
# One-hot encode emotions
ohe = OneHotEncoder(sparse=False)
emotions = ['anger', 'disgust', 'fear', 'happy', 'sadness', 'surprise', 'other']
ohe.fit(np.array(emotions)[:, None])
# Remove "other" (but not from data)
idx = np.ones(7, dtype=bool)
cats = ohe.categories_[0]
idx[cats == 'other'] = False
cats = cats[idx]
files = sorted(glob('data/ratings/*/*.tsv'))
mega_df = pd.concat([ | pd.read_csv(f, sep='\t', index_col=0) | pandas.read_csv |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
import argparse
def save(d1, fname):
d1.to_csv(fname, index=False)
def colC(d1):
if "C" in d1.columns : return
names = d1["Model Name"].values
nv = []
for s in names:
if "Lasso" in s:
v = float(s.split("Lasso")[-1]) if len(s.split("Lasso")) > 1 else 0
if v != 0 : v = 1/v
else:
v = float(s.split("MultiNB")[-1]) if len(s.split("MultiNB")) > 1 else 0
nv.append(v)
d1["C"] = nv
#
# for the different regularization parameters, get
# the mean F1 values and regularization strength
#
def ParameterImpact(d1):
# For all of the runs
# Group By Model Name - ModelName
# Logistic Lasso + <Regularization Parameter>
# For Logistic Lasso the Inverse Regularization Parameter is 'C' in sklearn
# Naive Bayes model names are MultiNB+<smoothing parameter>
# For Naive Bayse the Laplace Smoothing Parameter is alpha in sklearn
g=d1.groupby(['Model Name']).agg({
'Model Name': [lambda x : ' '.join(x)],
'C' :['mean'],
'F1':['mean','min','max']})
nameV = g['Model Name']['<lambda>'].values
f1MeanV = g['F1']['mean'].values
f1MinV = g['F1']['min'].values
f1MaxV = g['F1']['max'].values
CV = g['C']['mean'].values
namveV =[s.split(" ")[-1] for s in nameV]
combo=sorted([(name, ci, f1mean, f1min, f1max, ci) for name, ci, f1mean, f1min, f1max in zip(nameV, CV, f1MeanV, f1MinV, f1MaxV)],
key=lambda x : x[1], reverse=False)
N = len(combo)
model = [None]* N
param, f1Mean, f1Min, f1Max = np.zeros(N), np.zeros(N), np.zeros(N), np.zeros(N) #
for i, c in enumerate(combo):
model = c[0]
param[i] = c[1]
f1Mean[i] = c[2]
f1Min[i] = c[3]
f1Max[i] = c[4]
return model, param, f1Mean, f1Min, f1Max
def showOrSave(output=None):
if output is not None:
#plt.savefig("{}.pdf".format(output), bbox_inches='tight')
#plt.savefig("{}.jpg".format(output), bbox_inches='tight', rasterized=True, dpi=80)
plt.savefig("{}.pdf".format(output), bbox_inches='tight', rasterized=True, dpi=50)
else:
plt.show()
#
# Plot impact of regularization parameter on predicted F1 Score
#
def PlotL1(f1, output=None, showMinMax=False):
d1 = pd.read_csv(f1)
colC(d1)
dns = d1[d1["Stack"] == False]
modelns, xns, yns, yns_min, yns_max = ParameterImpact(dns)
ds = d1[d1["Stack"] == True]
models, xs, ys, ys_min, ys_max = ParameterImpact(ds)
plt.title('Logistic Lasso Regularization')
plt.xlabel('L1 Regularization(larger more regularization)')
plt.ylabel('F1 Score')
plt.plot(xns[1:],yns[1:], marker='o', label='unstacked-mean F1 score')
plt.plot(xs[1:],ys[1:], marker='o', label='stacked-mean F1 score')
if showMinMax :
plt.plot(xns[1:],yns_max[1:], marker='o', label='unstacked-max F1')
plt.plot(xns[1:],yns_min[1:], marker='o', label='unstacked-min F1')
plt.plot(xs[1:],ys_min[1:], marker='o', label='stacked-min F1')
plt.plot(xs[1:],ys_max[1:], marker='o', label='stacked-max F1')
plt.legend()
showOrSave(output)
#
# Plot the Number of Elements in the Matricies for different NGrams with
# Stacking and UnStacking Documents
#
def matrixSize(f1, output=None, f2=None):
d1 = | pd.read_csv(f1) | pandas.read_csv |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import pandas as pd
# File called _pytest for PyCharm compatibility
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from tests.common import TestData, assert_almost_equal
class TestDataFrameMetrics(TestData):
funcs = ["max", "min", "mean", "sum"]
extended_funcs = ["median", "mad", "var", "std"]
filter_data = [
"AvgTicketPrice",
"Cancelled",
"dayOfWeek",
"timestamp",
"DestCountry",
]
@pytest.mark.parametrize("numeric_only", [False, None])
def test_flights_metrics(self, numeric_only):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
for func in self.funcs:
# Pandas v1.0 doesn't support mean() on datetime
# Pandas and Eland don't support sum() on datetime
if not numeric_only:
dtype_include = (
[np.number, np.datetime64]
if func not in ("mean", "sum")
else [np.number]
)
pd_flights = pd_flights.select_dtypes(include=dtype_include)
ed_flights = ed_flights.select_dtypes(include=dtype_include)
pd_metric = getattr(pd_flights, func)(numeric_only=numeric_only)
ed_metric = getattr(ed_flights, func)(numeric_only=numeric_only)
assert_series_equal(pd_metric, ed_metric, check_dtype=False)
def test_flights_extended_metrics(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
# Test on reduced set of data for more consistent
# median behaviour + better var, std test for sample vs population
pd_flights = pd_flights[["AvgTicketPrice"]]
ed_flights = ed_flights[["AvgTicketPrice"]]
import logging
logger = logging.getLogger("elasticsearch")
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
for func in self.extended_funcs:
pd_metric = getattr(pd_flights, func)(
**({"numeric_only": True} if func != "mad" else {})
)
ed_metric = getattr(ed_flights, func)(numeric_only=True)
pd_value = pd_metric["AvgTicketPrice"]
ed_value = ed_metric["AvgTicketPrice"]
assert (ed_value * 0.9) <= pd_value <= (ed_value * 1.1) # +/-10%
def test_flights_extended_metrics_nan(self):
pd_flights = self.pd_flights()
ed_flights = self.ed_flights()
# Test on single row to test NaN behaviour of sample std/variance
pd_flights_1 = pd_flights[pd_flights.FlightNum == "9HY9SWR"][["AvgTicketPrice"]]
ed_flights_1 = ed_flights[ed_flights.FlightNum == "9HY9SWR"][["AvgTicketPrice"]]
for func in self.extended_funcs:
pd_metric = getattr(pd_flights_1, func)()
ed_metric = getattr(ed_flights_1, func)(numeric_only=False)
assert_series_equal(pd_metric, ed_metric, check_exact=False)
# Test on zero rows to test NaN behaviour of sample std/variance
pd_flights_0 = pd_flights[pd_flights.FlightNum == "XXX"][["AvgTicketPrice"]]
ed_flights_0 = ed_flights[ed_flights.FlightNum == "XXX"][["AvgTicketPrice"]]
for func in self.extended_funcs:
pd_metric = getattr(pd_flights_0, func)()
ed_metric = getattr(ed_flights_0, func)(numeric_only=False)
assert_series_equal(pd_metric, ed_metric, check_exact=False)
def test_ecommerce_selected_non_numeric_source_fields(self):
# None of these are numeric
columns = [
"category",
"currency",
"customer_birth_date",
"customer_first_name",
"user",
]
pd_ecommerce = self.pd_ecommerce()[columns]
ed_ecommerce = self.ed_ecommerce()[columns]
for func in self.funcs:
assert_series_equal(
getattr(pd_ecommerce, func)(numeric_only=True),
getattr(ed_ecommerce, func)(numeric_only=True),
check_exact=False,
)
def test_ecommerce_selected_mixed_numeric_source_fields(self):
# Some of these are numeric
columns = [
"category",
"currency",
"taxless_total_price",
"customer_birth_date",
"total_quantity",
"customer_first_name",
"user",
]
pd_ecommerce = self.pd_ecommerce()[columns]
ed_ecommerce = self.ed_ecommerce()[columns]
for func in self.funcs:
assert_series_equal(
getattr(pd_ecommerce, func)(numeric_only=True),
getattr(ed_ecommerce, func)(numeric_only=True),
check_exact=False,
)
def test_ecommerce_selected_all_numeric_source_fields(self):
# All of these are numeric
columns = ["total_quantity", "taxful_total_price", "taxless_total_price"]
pd_ecommerce = self.pd_ecommerce()[columns]
ed_ecommerce = self.ed_ecommerce()[columns]
for func in self.funcs:
assert_series_equal(
getattr(pd_ecommerce, func)(numeric_only=True),
getattr(ed_ecommerce, func)(numeric_only=True),
check_exact=False,
)
def test_flights_datetime_metrics_agg(self):
ed_timestamps = self.ed_flights()[["timestamp"]]
expected_values = {
"max": pd.Timestamp("2018-02-11 23:50:12"),
"min": pd.Timestamp("2018-01-01 00:00:00"),
"mean": pd.Timestamp("2018-01-21 19:20:45.564438232"),
"sum": pd.NaT,
"mad": pd.NaT,
"var": pd.NaT,
"std": pd.NaT,
"nunique": 12236,
}
ed_metrics = ed_timestamps.agg(
self.funcs + self.extended_funcs + ["nunique"], numeric_only=False
)
ed_metrics_dict = ed_metrics["timestamp"].to_dict()
ed_metrics_dict.pop("median") # Median is tested below.
for key, expected_value in expected_values.items():
assert_almost_equal(ed_metrics_dict[key], expected_value)
@pytest.mark.parametrize("agg", ["mean", "min", "max", "nunique"])
def test_flights_datetime_metrics_single_agg(self, agg):
ed_timestamps = self.ed_flights()[["timestamp"]]
expected_values = {
"min": pd.Timestamp("2018-01-01 00:00:00"),
"mean": pd.Timestamp("2018-01-21 19:20:45.564438232"),
"max": pd.Timestamp("2018-02-11 23:50:12"),
"nunique": 12236,
}
ed_metric = ed_timestamps.agg([agg])
if agg == "nunique":
# df with timestamp column should return int64
assert ed_metric.dtypes["timestamp"] == np.int64
else:
# df with timestamp column should return datetime64[ns]
assert ed_metric.dtypes["timestamp"] == np.dtype("datetime64[ns]")
assert_almost_equal(ed_metric["timestamp"][0], expected_values[agg])
@pytest.mark.parametrize("agg", ["mean", "min", "max"])
def test_flights_datetime_metrics_agg_func(self, agg):
ed_timestamps = self.ed_flights()[["timestamp"]]
expected_values = {
"min": pd.Timestamp("2018-01-01 00:00:00"),
"mean": pd.Timestamp("2018-01-21 19:20:45.564438232"),
"max": pd.Timestamp("2018-02-11 23:50:12"),
}
ed_metric = getattr(ed_timestamps, agg)(numeric_only=False)
assert ed_metric.dtype == np.dtype("datetime64[ns]")
assert_almost_equal(ed_metric[0], expected_values[agg])
@pytest.mark.parametrize("agg", ["median", "quantile"])
def test_flights_datetime_metrics_median_quantile(self, agg):
ed_df = self.ed_flights_small()[["timestamp"]]
median = ed_df.median(numeric_only=False)[0]
assert isinstance(median, pd.Timestamp)
assert (
pd.to_datetime("2018-01-01 10:00:00.000")
<= median
<= pd.to_datetime("2018-01-01 12:00:00.000")
)
agg_value = ed_df.agg([agg])["timestamp"][0]
assert isinstance(agg_value, pd.Timestamp)
assert (
pd.to_datetime("2018-01-01 10:00:00.000")
<= agg_value
<= pd.to_datetime("2018-01-01 12:00:00.000")
)
def test_metric_agg_keep_dtypes(self):
# max, min and median maintain their dtypes
df = self.ed_flights_small()[["AvgTicketPrice", "Cancelled", "dayOfWeek"]]
assert df.min().tolist() == [131.81910705566406, False, 0]
assert df.max().tolist() == [989.9527587890625, True, 0]
assert df.median().tolist() == [550.276123046875, False, 0]
all_agg = df.agg(["min", "max", "median"])
assert all_agg.dtypes.tolist() == [
np.dtype("float64"),
np.dtype("bool"),
np.dtype("int64"),
]
assert all_agg.to_dict() == {
"AvgTicketPrice": {
"max": 989.9527587890625,
"median": 550.276123046875,
"min": 131.81910705566406,
},
"Cancelled": {"max": True, "median": False, "min": False},
"dayOfWeek": {"max": 0, "median": 0, "min": 0},
}
# sum should always be the same dtype as the input, except for bool where the sum of bools should be an int64.
sum_agg = df.agg(["sum"])
assert sum_agg.dtypes.to_list() == [
np.dtype("float64"),
np.dtype("int64"),
np.dtype("int64"),
]
assert sum_agg.to_dict() == {
"AvgTicketPrice": {"sum": 26521.624084472656},
"Cancelled": {"sum": 6},
"dayOfWeek": {"sum": 0},
}
def test_flights_numeric_only(self):
# All Aggregations Data Check
ed_flights = self.ed_flights().filter(self.filter_data)
pd_flights = self.pd_flights().filter(self.filter_data)
# agg => numeric_only True returns float64 values
# We compare it with individual single agg functions of pandas with numeric_only=True
filtered_aggs = self.funcs + self.extended_funcs
agg_data = ed_flights.agg(filtered_aggs, numeric_only=True).transpose()
for agg in filtered_aggs:
# Explicitly check for mad because it returns nan for bools
if agg == "mad":
assert np.isnan(agg_data[agg]["Cancelled"])
else:
assert_series_equal(
agg_data[agg].rename(None),
getattr(pd_flights, agg)(numeric_only=True).astype(float),
check_exact=False,
rtol=True,
)
# all single aggs return float64 for numeric_only=True
def test_numeric_only_true_single_aggs(self):
ed_flights = self.ed_flights().filter(self.filter_data)
for agg in self.funcs + self.extended_funcs:
result = getattr(ed_flights, agg)(numeric_only=True)
assert result.dtype == np.dtype("float64")
assert result.shape == ((3,) if agg != "mad" else (2,))
# check dtypes and shape of min, max and median for numeric_only=False | None
@pytest.mark.parametrize("agg", ["min", "max", "median"])
@pytest.mark.parametrize("numeric_only", [False, None])
def test_min_max_median_numeric_only(self, agg, numeric_only):
ed_flights = self.ed_flights().filter(self.filter_data)
if numeric_only is False:
calculated_values = getattr(ed_flights, agg)(numeric_only=numeric_only)
assert isinstance(calculated_values["AvgTicketPrice"], np.float64)
assert isinstance(calculated_values["Cancelled"], np.bool_)
assert isinstance(calculated_values["dayOfWeek"], np.int64)
assert isinstance(calculated_values["timestamp"], pd.Timestamp)
assert np.isnan(calculated_values["DestCountry"])
assert calculated_values.shape == (5,)
elif numeric_only is None:
calculated_values = getattr(ed_flights, agg)(numeric_only=numeric_only)
assert isinstance(calculated_values["AvgTicketPrice"], np.float64)
assert isinstance(calculated_values["Cancelled"], np.bool_)
assert isinstance(calculated_values["dayOfWeek"], np.int64)
assert isinstance(calculated_values["timestamp"], pd.Timestamp)
assert calculated_values.shape == (4,)
# check dtypes and shape for sum
@pytest.mark.parametrize("numeric_only", [False, None])
def test_sum_numeric_only(self, numeric_only):
ed_flights = self.ed_flights().filter(self.filter_data)
if numeric_only is False:
calculated_values = ed_flights.sum(numeric_only=numeric_only)
assert isinstance(calculated_values["AvgTicketPrice"], np.float64)
assert isinstance(calculated_values["dayOfWeek"], np.int64)
assert isinstance(calculated_values["Cancelled"], np.int64)
assert pd.isnull(calculated_values["timestamp"])
assert np.isnan(calculated_values["DestCountry"])
assert calculated_values.shape == (5,)
elif numeric_only is None:
calculated_values = ed_flights.sum(numeric_only=numeric_only)
dtype_list = [calculated_values[i].dtype for i in calculated_values.index]
assert dtype_list == [
np.dtype("float64"),
np.dtype("int64"),
np.dtype("int64"),
]
assert calculated_values.shape == (3,)
# check dtypes and shape for std
@pytest.mark.parametrize("numeric_only", [False, None])
def test_std_numeric_only(self, numeric_only):
ed_flights = self.ed_flights().filter(self.filter_data)
if numeric_only is False:
calculated_values = ed_flights.std(numeric_only=numeric_only)
assert isinstance(calculated_values["AvgTicketPrice"], float)
assert isinstance(calculated_values["Cancelled"], float)
assert isinstance(calculated_values["dayOfWeek"], float)
assert pd.isnull(calculated_values["timestamp"])
assert np.isnan(calculated_values["DestCountry"])
assert calculated_values.shape == (5,)
elif numeric_only is None:
calculated_values = ed_flights.std(numeric_only=numeric_only)
assert isinstance(calculated_values["AvgTicketPrice"], float)
assert isinstance(calculated_values["Cancelled"], float)
assert isinstance(calculated_values["dayOfWeek"], float)
assert calculated_values.shape == (3,)
# check dtypes and shape for var
@pytest.mark.parametrize("numeric_only", [False, None])
def test_var_numeric_only(self, numeric_only):
ed_flights = self.ed_flights().filter(self.filter_data)
if numeric_only is False:
calculated_values = ed_flights.var(numeric_only=numeric_only)
assert isinstance(calculated_values["AvgTicketPrice"], np.float64)
assert isinstance(calculated_values["dayOfWeek"], np.float64)
assert isinstance(calculated_values["Cancelled"], np.float64)
assert | pd.isnull(calculated_values["timestamp"]) | pandas.isnull |
import pandas as pd
import os.path as osp
import inspect
from torch_geometric.data import Data
from sklearn import preprocessing
import torch
import random
import numpy as np
import pdb
from utils.utils import get_known_mask, mask_edge
def create_node(df, mode):
if mode == 0: # onehot feature node, all 1 sample node
nrow, ncol = df.shape
feature_ind = np.array(range(ncol))
feature_node = np.zeros((ncol,ncol))
feature_node[np.arange(ncol), feature_ind] = 1
sample_node = [[1]*ncol for i in range(nrow)]
node = sample_node + feature_node.tolist()
elif mode == 1: # onehot sample and feature node
nrow, ncol = df.shape
feature_ind = np.array(range(ncol))
feature_node = np.zeros((ncol,ncol+1))
feature_node[np.arange(ncol), feature_ind+1] = 1
sample_node = np.zeros((nrow,ncol+1))
sample_node[:,0] = 1
node = sample_node.tolist() + feature_node.tolist()
return node
def create_edge(df):
n_row, n_col = df.shape
edge_start = []
edge_end = []
for x in range(n_row):
edge_start = edge_start + [x] * n_col # obj
edge_end = edge_end + list(n_row+np.arange(n_col)) # att
edge_start_new = edge_start + edge_end
edge_end_new = edge_end + edge_start
return (edge_start_new, edge_end_new)
def create_edge_attr(df):
nrow, ncol = df.shape
edge_attr = []
for i in range(nrow):
for j in range(ncol):
edge_attr.append([float(df.iloc[i,j])])
edge_attr = edge_attr + edge_attr
return edge_attr
def get_data(df_X, df_y, node_mode, train_edge_prob, split_sample_ratio, split_by, train_y_prob, seed=0, normalize=True):
if len(df_y.shape)==1:
df_y = df_y.to_numpy()
elif len(df_y.shape)==2:
df_y = df_y[0].to_numpy()
if normalize:
x = df_X.values
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df_X = pd.DataFrame(x_scaled)
edge_start, edge_end = create_edge(df_X)
edge_index = torch.tensor([edge_start, edge_end], dtype=int)
edge_attr = torch.tensor(create_edge_attr(df_X), dtype=torch.float)
node_init = create_node(df_X, node_mode)
x = torch.tensor(node_init, dtype=torch.float)
y = torch.tensor(df_y, dtype=torch.float)
#set seed to fix known/unknwon edges
torch.manual_seed(seed)
#keep train_edge_prob of all edges
train_edge_mask = get_known_mask(train_edge_prob, int(edge_attr.shape[0]/2))
double_train_edge_mask = torch.cat((train_edge_mask, train_edge_mask), dim=0)
#mask edges based on the generated train_edge_mask
#train_edge_index is known, test_edge_index in unknwon, i.e. missing
train_edge_index, train_edge_attr = mask_edge(edge_index, edge_attr,
double_train_edge_mask, True)
train_labels = train_edge_attr[:int(train_edge_attr.shape[0]/2),0]
test_edge_index, test_edge_attr = mask_edge(edge_index, edge_attr,
~double_train_edge_mask, True)
test_labels = test_edge_attr[:int(test_edge_attr.shape[0]/2),0]
#mask the y-values during training, i.e. how we split the training and test sets
train_y_mask = get_known_mask(train_y_prob, y.shape[0])
test_y_mask = ~train_y_mask
data = Data(x=x, y=y, edge_index=edge_index, edge_attr=edge_attr,
train_y_mask=train_y_mask, test_y_mask=test_y_mask,
train_edge_index=train_edge_index,train_edge_attr=train_edge_attr,
train_edge_mask=train_edge_mask,train_labels=train_labels,
test_edge_index=test_edge_index,test_edge_attr=test_edge_attr,
test_edge_mask=~train_edge_mask,test_labels=test_labels,
df_X=df_X,df_y=df_y,
edge_attr_dim=train_edge_attr.shape[-1],
user_num=df_X.shape[0]
)
if split_sample_ratio > 0.:
if split_by == 'y':
sorted_y, sorted_y_index = torch.sort(torch.reshape(y,(-1,)))
elif split_by == 'random':
sorted_y_index = torch.randperm(y.shape[0])
lower_y_index = sorted_y_index[:int(np.floor(y.shape[0]*split_sample_ratio))]
higher_y_index = sorted_y_index[int(np.floor(y.shape[0]*split_sample_ratio)):]
# here we don't split x, only split edge
# train
half_train_edge_index = train_edge_index[:,:int(train_edge_index.shape[1]/2)];
lower_train_edge_mask = []
for node_index in half_train_edge_index[0]:
if node_index in lower_y_index:
lower_train_edge_mask.append(True)
else:
lower_train_edge_mask.append(False)
lower_train_edge_mask = torch.tensor(lower_train_edge_mask)
double_lower_train_edge_mask = torch.cat((lower_train_edge_mask, lower_train_edge_mask), dim=0)
lower_train_edge_index, lower_train_edge_attr = mask_edge(train_edge_index, train_edge_attr,
double_lower_train_edge_mask, True)
lower_train_labels = lower_train_edge_attr[:int(lower_train_edge_attr.shape[0]/2),0]
higher_train_edge_index, higher_train_edge_attr = mask_edge(train_edge_index, train_edge_attr,
~double_lower_train_edge_mask, True)
higher_train_labels = higher_train_edge_attr[:int(higher_train_edge_attr.shape[0]/2),0]
# test
half_test_edge_index = test_edge_index[:,:int(test_edge_index.shape[1]/2)];
lower_test_edge_mask = []
for node_index in half_test_edge_index[0]:
if node_index in lower_y_index:
lower_test_edge_mask.append(True)
else:
lower_test_edge_mask.append(False)
lower_test_edge_mask = torch.tensor(lower_test_edge_mask)
double_lower_test_edge_mask = torch.cat((lower_test_edge_mask, lower_test_edge_mask), dim=0)
lower_test_edge_index, lower_test_edge_attr = mask_edge(test_edge_index, test_edge_attr,
double_lower_test_edge_mask, True)
lower_test_labels = lower_test_edge_attr[:int(lower_test_edge_attr.shape[0]/2),0]
higher_test_edge_index, higher_test_edge_attr = mask_edge(test_edge_index, test_edge_attr,
~double_lower_test_edge_mask, True)
higher_test_labels = higher_test_edge_attr[:int(higher_test_edge_attr.shape[0]/2),0]
data.lower_y_index = lower_y_index
data.higher_y_index = higher_y_index
data.lower_train_edge_index = lower_train_edge_index
data.lower_train_edge_attr = lower_train_edge_attr
data.lower_train_labels = lower_train_labels
data.higher_train_edge_index = higher_train_edge_index
data.higher_train_edge_attr = higher_train_edge_attr
data.higher_train_labels = higher_train_labels
data.lower_test_edge_index = lower_test_edge_index
data.lower_test_edge_attr = lower_test_edge_attr
data.lower_test_labels = lower_train_labels
data.higher_test_edge_index = higher_test_edge_index
data.higher_test_edge_attr = higher_test_edge_attr
data.higher_test_labels = higher_test_labels
return data
def load_data(args):
uci_path = osp.dirname(osp.abspath(inspect.getfile(inspect.currentframe())))
df_np = np.loadtxt(uci_path+'/raw_data/{}/data/data.txt'.format(args.data))
df_y = pd.DataFrame(df_np[:, -1:])
df_X = | pd.DataFrame(df_np[:, :-1]) | pandas.DataFrame |
"""
Some utility functions the different instrumentation backends
"""
import itertools
from functools import partial
import numpy
from pandas import DataFrame
from ..inspections.inspection_input import InspectionInputRow
def build_annotation_df_from_iters(inspections, annotation_iterators):
"""
Build the annotations dataframe
"""
annotation_iterators = itertools.zip_longest(*annotation_iterators)
inspection_names = [str(inspection) for inspection in inspections]
annotations_df = | DataFrame(annotation_iterators, columns=inspection_names) | pandas.DataFrame |
import numpy as np
import pandas as pd
import scanpy as sc
from termcolor import colored
import time
import matplotlib
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import euclidean_distances
import umap
import phate
import seaborn as sns
from pyVIA.core import *
def cellrank_Human(ncomps=80, knn=30, v0_random_seed=7):
import scvelo as scv
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC', 'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
ad = scv.read_loom('/home/shobi/Downloads/Human Hematopoietic Profiling homo_sapiens 2019-11-08 16.12.loom')
print(ad)
# ad = sc.read('/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# ad.obs['nover_label'] = nover_labels
print('start cellrank pipeline', time.ctime())
# scv.utils.show_proportions(ad)
scv.pl.proportions(ad)
scv.pp.filter_and_normalize(ad, min_shared_counts=20, n_top_genes=2000)
sc.tl.pca(ad, n_comps=ncomps)
n_pcs = ncomps
print('npcs', n_pcs, 'knn', knn)
sc.pp.neighbors(ad, n_pcs=n_pcs, n_neighbors=knn)
sc.tl.louvain(ad, key_added='clusters', resolution=1)
scv.pp.moments(ad, n_pcs=n_pcs, n_neighbors=knn)
scv.tl.velocity(ad)
scv.tl.velocity_graph(ad)
scv.pl.velocity_embedding_stream(ad, basis='umap', color='nover_label')
def adata_preprocess(adata, n_top_genes=1000, log=True):
# this is a lot like the steps for scvelo.pp.filter_and_normalize() which also allows selection of top genes (see Pancreas)
sc.pp.filter_genes(adata, min_counts=1) # only consider genes with more than 1 count#1
# print(adata)
sc.pp.normalize_per_cell( # normalize with total UMI count per cell #same as normalize_total()
adata, key_n_counts='n_counts_all'
)
# select highly-variable genes
filter_result = sc.pp.filter_genes_dispersion(adata.X, flavor='cell_ranger', n_top_genes=n_top_genes, log=False)
adata = adata[:, filter_result.gene_subset] # subset the genes
sc.pp.normalize_per_cell(adata) # renormalize after filtering
if log: sc.pp.log1p(adata) # log transform: adata.X = log(adata.X + 1)
'''
total = adata.X
total = total.sum(axis=0).transpose()
total = pd.DataFrame(total.transpose())
print('total')
print(total.shape)
#total = total.sum(axis=0).transpose()
total.columns = [i for i in adata.var_names]
print(total)
total.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/library_counts_500hvg.csv')
sc.pp.scale(adata, max_value=10)
from sklearn.decomposition import PCA
pca = PCA(n_components=499) # estimate only 2 PCs
X_new = pca.fit_transform(adata.X)
print('variance explained')
print(pca.explained_variance_ratio_)
print('pca.components_ shape ncomp x nfeat')
print()
df = pd.DataFrame(abs(pca.components_))
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print('done saving')
'''
# sc.pp.scale(adata, max_value=10)zheng scales after the log, but this doesnt work well and is also not used in scvelo.pp.filter_and_normalize
return adata
def main_Human(ncomps=80, knn=30, v0_random_seed=7, run_palantir_func=False):
'''
df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_500hvg.csv')
print(df)
df = df.set_index('Unnamed: 0')
print(df)
df = df.sort_values(by='totals', axis=1, ascending = False)
df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/pca_components_importance_sorted_500hvg.csv')
print('saved')
'''
import random
random.seed(100)
dict_abb = {'Basophils': 'BASO1', 'CD4+ Effector Memory': 'TCEL7', 'Colony Forming Unit-Granulocytes': 'GRAN1',
'Colony Forming Unit-Megakaryocytic': 'MEGA1', 'Colony Forming Unit-Monocytes': 'MONO1',
'Common myeloid progenitors': "CMP", 'Early B cells': "PRE_B2", 'Eosinophils': "EOS2",
'Erythroid_CD34- CD71+ GlyA-': "ERY2", 'Erythroid_CD34- CD71+ GlyA+': "ERY3",
'Erythroid_CD34+ CD71+ GlyA-': "ERY1", 'Erythroid_CD34- CD71lo GlyA+': 'ERY4',
'Granulocyte/monocyte progenitors': "GMP", 'Hematopoietic stem cells_CD133+ CD34dim': "HSC1",
'Hematopoietic stem cells_CD38- CD34+': "HSC2",
'Mature B cells class able to switch': "B_a2", 'Mature B cells class switched': "B_a4",
'Mature NK cells_CD56- CD16- CD3-': "Nka3", 'Monocytes': "MONO2",
'Megakaryocyte/erythroid progenitors': "MEP", 'Myeloid Dendritic Cells': 'mDC (cDC)',
'Naïve B cells': "B_a1",
'Plasmacytoid Dendritic Cells': "pDC", 'Pro B cells': 'PRE_B3'}
# NOTE: Myeloid DCs are now called Conventional Dendritic Cells cDCs
string_ = 'ncomp =' + str(ncomps) + ' knn=' + str(knn) + ' randseed=' + str(v0_random_seed)
# print('ncomp =', ncomps, ' knn=', knn, ' randseed=', v0_random_seed)
print(colored(string_, 'blue'))
nover_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_PredFine_notLogNorm.csv')[
'x'].values.tolist()
nover_labels = [dict_abb[i] for i in nover_labels]
df_nover = pd.DataFrame(nover_labels)
# df_nover.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/noverLabelsforMonocle.csv')
print('save nover')
for i in list(set(nover_labels)):
print('the population of ', i, 'is ', nover_labels.count(i))
parc53_labels = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/Nover_Cor_Parc53_set1.csv')[
'x'].values.tolist()
parclabels_all = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels_all_set1.csv')[
'parc'].values.tolist()
parc_dict_nover = {}
for i, c in enumerate(parc53_labels):
parc_dict_nover[i] = dict_abb[c]
parclabels_all = [parc_dict_nover[ll] for ll in parclabels_all]
# print('all', len(parclabels_all))
ad = sc.read(
'/home/shobi/Trajectory/Datasets/HumanCD34/human_cd34_bm_rep1.h5ad')
# 5780 cells x 14651 genes Human Replicate 1. Male african american, 38 years
print('h5ad ad size', ad)
colors = pd.Series(ad.uns['cluster_colors'])
colors['10'] = '#0b128f'
ct_colors = pd.Series(ad.uns['ct_colors'])
list_var_names = ad.var_names
# print(list_var_names)
ad.uns['iroot'] = np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0]
print('iroot', np.flatnonzero(ad.obs_names == ad.obs['palantir_pseudotime'].idxmin())[0])
tsne = pd.DataFrame(ad.obsm['tsne'], index=ad.obs_names, columns=['x', 'y'])
tsnem = ad.obsm['tsne']
palantir_tsne_df = pd.DataFrame(tsnem)
# palantir_tsne_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/palantir_tsne.csv')
revised_clus = ad.obs['clusters'].values.tolist().copy()
loc_DCs = [i for i in range(5780) if ad.obs['clusters'].values.tolist()[i] == '7']
for loc_i in loc_DCs:
if ad.obsm['palantir_branch_probs'][loc_i, 5] > ad.obsm['palantir_branch_probs'][
loc_i, 2]: # if prob that cDC > pDC, then relabel as cDC
revised_clus[loc_i] = '10'
revised_clus = [int(i) for i in revised_clus]
# magic_df = ad.obsm['MAGIC_imputed_data']
# ad.X: Filtered, normalized and log transformed count matrix
# ad.raw.X: Filtered raw count matrix
# print('before extra filtering' ,ad.shape)
# sc.pp.filter_genes(ad, min_cells=10)
# print('after extra filtering', ad.shape)
adata_counts = sc.AnnData(ad.X)
print(ad.raw.X.shape)
# df_X = pd.DataFrame(ad.raw.X.todense(), columns = ad.var_names)
# df_X.columns = [i for i in ad.var_names]
# print('starting to save .X')
# df_X.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/expression_matrix_raw.csv")
print('finished save .X')
# (ad.X) # ad.X is filtered, lognormalized,scaled// ad.raw.X is the filtered but not pre-processed
adata_counts.obs_names = ad.obs_names
adata_counts.var_names = ad.var_names
adata_counts_raw = sc.AnnData(ad.raw.X)
adata_counts_raw.var_names = [i for i in ad.var_names]
# adata_counts_raw = adata_preprocess(adata_counts_raw, n_top_genes=500, log=True) # when using HVG and no PCA
# sc.tl.pca(adata_counts_raw,svd_solver='arpack', n_comps=ncomps)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
import colorcet as cc
# tsnem = TSNE().fit_transform(adata_counts.obsm['X_pca'])
'''
f, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True)
line = np.linspace(0, 1, len(set(revised_clus)))
for color, group in zip(line, set(revised_clus)):
where = np.where(np.array(revised_clus) == group)[0]
ax1.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend()
ax1.set_title('Palantir Phenograph Labels')
import colorcet as cc
marker = ['x', '+', (5, 0), '>', 'o', (5, 2)]
line_nover = np.linspace(0, 1, len(set(nover_labels)))
col_i = 0
for color, group in zip(line_nover, set(nover_labels)):
where = np.where(np.array(nover_labels) == group)[0]
marker_x = marker[random.randint(0, 5)]
# ax2.scatter(tsnem[where, 0],tsnem[where, 1], label=group, c=plt.cm.nipy_spectral(color), marker = marker_x, alpha=0.5)
ax2.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], marker=marker_x,
alpha=0.5)
col_i = col_i + 1
ax2.legend(fontsize=6)
ax2.set_title('Novershtern Corr. Labels')
line = np.linspace(0, 1, len(set(parclabels_all)))
col_i = 0
for color, group in zip(line, set(parclabels_all)):
where = np.where(np.array(parclabels_all) == group)[0]
ax3.scatter(tsnem[where, 0], tsnem[where, 1], label=group, c=cc.glasbey_dark[col_i], alpha=0.5)
col_i = col_i + 1
ax3.legend()
ax3.set_title('Parc53 Nover Labels')
# plt.show()
'''
'''
plt.figure(figsize=[5, 5])
plt.title('palantir, ncomps = ' + str(ncomps) + ' knn' + str(knn))
for group in set(revised_clus):
loc_group = np.where(np.asarray(revised_clus) == group)[0]
plt.scatter(tsnem[loc_group, 0], tsnem[loc_group, 1], s=5, color=colors[group], label=group)
ax = plt.gca()
ax.set_axis_off()
ax.legend(fontsize=6)
'''
gene_list = [
'ITGAX'] # ['GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA', 'ITGAX', 'IGHD',
# 'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
true_label = nover_labels # revised_clus
root_user = [4823]
print('v0 random seed', v0_random_seed)
# df_temp_write = pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:200])
# df_temp_write.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/Human_CD34_200PCA.csv")
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts_raw.X.todense()
print(time.ctime())
print(time.ctime())
v0 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.2,
root_user=root_user, dataset='humanCD34', preserve_disconnected=True, random_seed=v0_random_seed,
do_impute_bool=True, is_coarse=True, pseudotime_threshold_TS=10,
neighboring_terminal_states_threshold=3) # *.4 root=1,
v0.run_VIA()
v0.make_JSON(filename='scRNA_Hema_temp.js')
super_labels = v0.labels
print('starting to save selected genes')
genes_save = ['ITGAX', 'GATA1', 'GATA2', 'ITGA2B', 'CSF1R', 'MPO', 'CD79B', 'SPI1', 'IRF8', 'CD34', 'IL3RA',
'ITGAX', 'IGHD',
'CD27', 'CD14', 'CD22', 'ITGAM', 'CLC', 'MS4A3', 'FCGR3A', 'CSF1R']
df_selected_genes = pd.DataFrame(adata_counts.X, columns=[cc for cc in adata_counts.var_names])
df_selected_genes = df_selected_genes[genes_save]
# df_selected_genes.to_csv("/home/shobi/Trajectory/Datasets/HumanCD34/selected_genes.csv")
df_ = pd.DataFrame(ad.X)
df_.columns = [i for i in ad.var_names]
print('start magic')
gene_list_magic = ['IL3RA', 'IRF8', 'GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B', 'SPI1', 'CD34', 'CSF1R', 'ITGAX']
df_magic = v0.do_impute(df_, magic_steps=3, gene_list=gene_list_magic)
df_magic_cluster = df_magic.copy()
df_magic_cluster['parc'] = v0.labels
df_magic_cluster = df_magic_cluster.groupby('parc', as_index=True).mean()
print('end magic', df_magic.shape)
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v0.draw_piechart_graph(ax, ax1, type_pt='gene', gene_exp=df_magic_cluster['GATA1'].values, title='GATA1')
plt.show()
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v0, np.arange(0, len(true_label)))
draw_trajectory_gams(tsnem, super_clus_ds_PCA_loc, super_labels, super_labels, v0.edgelist_maxout,
v0.x_lazy, v0.alpha_teleport, v0.single_cell_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
sub_terminal_clusters=v0.terminal_clusters,
title_str='Markov Hitting Times (Gams)', ncomp=str(Xin.shape[1]))
plt.show()
print('super labels', set(super_labels))
ad.obs['via0_label'] = [str(i) for i in super_labels]
magic_ad = ad.obsm['MAGIC_imputed_data']
magic_ad = sc.AnnData(magic_ad)
magic_ad.obs_names = ad.obs_names
magic_ad.var_names = ad.var_names
magic_ad.obs['via0_label'] = [str(i) for i in super_labels]
marker_genes = {"ERY": ['GATA1', 'GATA2', 'ITGA2B'], "BCell": ['IGHD', 'CD22'],
"DC": ['IRF8', 'IL3RA', 'IRF4', 'CSF2RA', 'ITGAX'],
"MONO": ['CD14', 'SPI1', 'MPO', 'IL12RB1', 'IL13RA1', 'C3AR1', 'FCGR3A'], 'HSC': ['CD34']}
sc.pl.matrixplot(magic_ad, marker_genes, groupby='via0_label', dendrogram=True)
'''
sc.tl.rank_genes_groups(ad, groupby='via0_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.rank_genes_groups_heatmap(ad, n_genes=10, groupby="via0_label", show_gene_labels=True, use_raw=False)
sc.pl.rank_genes_groups_tracksplot(ad, groupby='via0_label', n_genes = 3) # plot the result
'''
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34', 'GATA1', 'IL3RA']: # ,'SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
plt.show()
super_edges = v0.edgelist_maxout # v0.edgelist
tsi_list = get_loc_terminal_states(v0, Xin)
v1 = VIA(Xin, true_label, jac_std_global=0.15, dist_std_local=1, knn=knn,
too_big_factor=0.05, super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user,
x_lazy=0.95, alpha_teleport=0.99, dataset='humanCD34', preserve_disconnected=True,
super_terminal_clusters=v0.terminal_clusters, is_coarse=False, full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned,
random_seed=v0_random_seed, pseudotime_threshold_TS=10) # *.4super_terminal_cells = tsi_list #3root=1,
v1.run_VIA()
labels = v1.labels
v1.make_JSON(filename='scRNA_Hema_via1_temp.js')
df_magic_cluster = df_magic.copy()
df_magic_cluster['via1'] = v1.labels
df_magic_cluster = df_magic_cluster.groupby('via1', as_index=True).mean()
# print('df_magic_cluster', df_magic_cluster)
'''
#Get the clustsergraph gene expression on topology
for gene_i in gene_list_magic:
f, ((ax, ax1)) = plt.subplots(1, 2, sharey=True)
v1.draw_piechart_graph(ax,ax1,type_pt='gene', gene_exp = df_magic_cluster[gene_i].values, title = gene_i)
plt.show()
'''
ad.obs['parc1_label'] = [str(i) for i in labels]
'''
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster
for tsi in v1.revised_super_terminal_clusters:
loc_i = np.where(super_labels == tsi)[0]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = v0.knn_struct.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
sc.tl.rank_genes_groups(ad, groupby='parc1_label', use_raw=True,
method='wilcoxon', n_genes=10) # compute differential expression
sc.pl.matrixplot(ad, marker_genes, groupby='parc1_label', use_raw=False)
sc.pl.rank_genes_groups_heatmap(ad, n_genes=3, groupby="parc1_label", show_gene_labels=True, use_raw=False)
'''
label_df = pd.DataFrame(labels, columns=['parc'])
# label_df.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/parclabels.csv', index=False)
gene_ids = adata_counts.var_names
obs = ad.raw.X.toarray()
print('shape obs', obs.shape)
obs = pd.DataFrame(obs, columns=gene_ids)
# obs['parc']=v1.labels
obs['louvain'] = revised_clus
# obs_average = obs.groupby('parc', as_index=True).mean()
obs_average = obs.groupby('louvain', as_index=True).mean()
# print(obs_average.head())
# obs_average.to_csv('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.csv', index=False)
ad_obs = sc.AnnData(obs_average)
ad_obs.var_names = gene_ids
ad_obs.obs['parc'] = [i for i in range(len(set(revised_clus)))] # v1.labels instaed of revised_clus
# sc.write('/home/shobi/Trajectory/Datasets/HumanCD34/louvain_palantir_average.h5ad',ad_obs)
# fig_0, ax_0 = plt.subplots()
loaded_magic_df = pd.read_csv('/home/shobi/Trajectory/Datasets/HumanCD34/MAGIC_palantir_knn30ncomp100_subset.csv')
# loaded_magic_df.head()
for gene_name in ['ITGA2B', 'IL3RA',
'IRF8',
'MPO', 'CSF1R', 'GATA2', 'CD79B',
'CD34']: # ['GATA1', 'GATA2', 'ITGA2B', 'MPO', 'CD79B','IRF8','SPI1', 'CD34','CSF1R','IL3RA','IRF4', 'CSF2RA','ITGAX']:
print('gene name', gene_name)
# DC markers https://www.cell.com/pb-assets/products/nucleus/nucleus-phagocytes/rnd-systems-dendritic-cells-br.pdf
gene_name_dict = {'GATA1': 'GATA1', 'GATA2': 'GATA2', 'ITGA2B': 'CD41 (Mega)', 'MPO': 'MPO (Mono)',
'CD79B': 'CD79B (B)', 'IRF8': 'IRF8 (DC)', 'SPI1': 'PU.1', 'CD34': 'CD34',
'CSF1R': 'CSF1R (cDC Up. Up then Down in pDC)', 'IL3RA': 'CD123 (pDC)', 'IRF4': 'IRF4 (pDC)',
'ITGAX': 'ITGAX (cDCs)', 'CSF2RA': 'CSF2RA (cDC)'}
loc_gata = np.where(np.asarray(ad.var_names) == gene_name)[0][0]
magic_ad = ad.obsm['MAGIC_imputed_data'][:, loc_gata]
magic_ad = loaded_magic_df[gene_name]
# subset_ = magic_ad
subset_ = df_magic[gene_name].values
print(subset_.shape)
# print('shapes of magic_ad 1 and 2', magic_ad.shape,subset_.shape)
# v1.get_gene_expression(magic_ad,title_gene = gene_name_dict[gene_name])
v1.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name])
# v0.get_gene_expression(subset_, title_gene=gene_name_dict[gene_name] + 'VIA MAGIC')
print('start tsne')
n_downsample = 4000
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=4000)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=5780, replace=False, p=None)
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
print('labels p1', len(labels), set(labels))
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov)[idx])
# graph_hnsw = v0.knngraph_visual()
embedding = tsnem[idx, :] # TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
# phate_op = phate.PHATE()
# embedding = phate_op.fit_transform(adata_counts.obsm['X_pca'][:, 0:20])
# embedding = embedding[idx, :]
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:20])
print('size of downsampled embedding', embedding.shape)
else:
embedding = tsnem # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][:,0:20])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
# DRAW EVOLUTION PATHS
knn_hnsw = make_knn_embeddedspace(embedding)
draw_sc_evolution_trajectory_dijkstra(v1, embedding, knn_hnsw, v0.full_graph_shortpath, idx)
plt.show()
def main_Toy_comparisons(ncomps=10, knn=30, random_seed=42, dataset='Toy3', root_user='M1',
foldername="/home/shobi/Trajectory/Datasets/Toy3/"):
print('dataset, ncomps, knn, seed', dataset, ncomps, knn, random_seed)
# root_user = ["T1_M1", "T2_M1"] # "M1" # #'M1' # "T1_M1", "T2_M1"] #"T1_M1"
if dataset == "Toy3":
print('dataset Toy3')
df_counts = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000.csv",
delimiter=",")
#df_counts = pd.read_csv(foldername + "Toy3_noise_100genes_thinfactor8.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_multifurcating_M8_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy3/toy_multifurcating_M8_n1000d1000.csv")
print('df_ids', df_ids.columns)
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C107'
if dataset == "Toy4": # 2 disconnected components
df_counts = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "Toy4_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "toy_disconnected_M9_n1000d1000_ids_with_truetime.csv", delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/Toy4/toy_disconnected_M9_n1000d1000.csv")
print(df_counts.shape, 'df_counts shape')
root_user = ['T1_M1', 'T2_M1'] # 'T1_M1'
paga_root = 'T2_M1'
palantir_root = 'C107'
if dataset == "Connected":
df_counts = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000.csv", delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "ToyConnected_M9_n2000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected/ToyConnected_M9_n2000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1'
if dataset == "Connected2":
df_counts = pd.read_csv(foldername + "Connected2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyConnected2_noise_500genes.csv", 'rt',delimiter=",")
df_ids = pd.read_csv(foldername + "Connected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C11'
# suggest to use visual jaccard pruning of 1 (this doesnt alter underlying graph, just the display. can also use "M2" as the starting root,
if dataset == "ToyMultiM11":
df_counts = pd.read_csv(foldername + "Toymulti_M11_n3000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv(foldername + "ToyMulti_M11_noised.csv", delimiter=",")
df_ids = pd.read_csv(foldername + "Toymulti_M11_n3000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv( "/home/shobi/Trajectory/Datasets/ToyMultifurcating_M11/Toymulti_M11_n3000d1000.csv")
root_user = ['M1']
paga_root = "M1"
palantir_root = 'C1005'
if dataset == "Cyclic": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_noise_100genes_thinfactor3.csv",
delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic/ToyCyclic_M4_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1'
if dataset == "Cyclic2": # 4 milestones
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv",
delimiter=",")
# df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/ToyCyclic2_noise_500genes.csv", delimiter=",")
df_ids = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyCyclic2/Cyclic2_n1000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C107'
if dataset == 'Bifurc2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/ToyBifurc2_noised.csv", delimiter=",")
df_ids = pd.read_csv( "/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000_ids_with_truetime.csv",delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyBifurcating2/Bifurc2_M4_n2000d1000.csv")
root_user = ['M1'] # 'T1_M1'
paga_root = 'M1'
palantir_root = 'C1006'
if dataset == 'Disconnected2':
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv",
delimiter=",")
df_counts = pd.read_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/ToyDisconnected2_noise_500genes.csv",
delimiter=",")
df_ids = pd.read_csv(
"/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000_ids_with_truetime.csv",
delimiter=",")
#counts = palantir.io.from_csv("/home/shobi/Trajectory/Datasets/ToyDisconnected2/Disconnected2_n1000d1000.csv")
root_user = ['T1_M1', 'T1_M2', 'T1_M3'] # 'T1_M1'
paga_root = 'T1_M1'
palantir_root = 'C125'
df_ids['cell_id_num'] = [int(s[1::]) for s in df_ids['cell_id']]
print("shape", df_counts.shape, df_ids.shape)
df_counts = df_counts.drop('Unnamed: 0', 1)
df_ids = df_ids.sort_values(by=['cell_id_num'])
df_ids = df_ids.reset_index(drop=True)
# df_ids.to_csv("/home/shobi/Trajectory/Datasets/ToyConnected2/Connected2_n1000d1000_ids_sorted_with_truetime.csv")
# df_counts['group_id'] = df_ids['group_id']#to split Toy4
# df_counts['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_ids['main_Traj'] = [i[0:2] for i in df_ids['group_id']]#to split Toy4
# df_counts = df_counts[df_counts['main_Traj']=='T2']#to split Toy4
# df_ids = df_ids[df_ids['main_Traj'] == 'T2']#to split Toy4
#true_time = df_ids['true_time']
true_label = df_ids['group_id'].tolist()
# df_counts = df_counts.drop('main_Traj', 1)#to split Toy4
# df_counts = df_counts.drop('group_id', 1)#to split Toy4
# df_ids = df_ids.reset_index(drop=True)#to split Toy4
# df_counts = df_counts.reset_index(drop=True)#to split Toy4
# true_label = df_ids['group_id'] #to split Toy4
print("shape", df_counts.index, df_ids.index)
adata_counts = sc.AnnData(df_counts, obs=df_ids)
sc.tl.pca(adata_counts, svd_solver='arpack', n_comps=ncomps)
# comparisons
adata_counts.uns['iroot'] = np.flatnonzero(adata_counts.obs['group_id'] == paga_root)[0] # 'T1_M1'#'M1'
do_paga = False #
do_palantir = False #
# comparisons
if do_paga == True:
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X', ) # n_pcs=ncomps) # 4
sc.tl.draw_graph(adata_counts)
# sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data') # force-directed layout
start_dfmap = time.time()
# sc.tl.diffmap(adata_counts, n_comps=ncomps)
sc.tl.diffmap(adata_counts, n_comps=200) # default retains n_comps = 15
print('time taken to get diffmap given knn', time.time() - start_dfmap)
sc.pp.neighbors(adata_counts, n_neighbors=knn, use_rep='X_diffmap') # 4
sc.tl.draw_graph(adata_counts)
sc.pl.draw_graph(adata_counts, color='group_id', legend_loc='on data')
sc.tl.leiden(adata_counts, resolution=1.0, random_state=10)
sc.tl.paga(adata_counts, groups='leiden')
# sc.pl.paga(adata_counts, color=['leiden','group_id'])
sc.tl.dpt(adata_counts, n_dcs=ncomps)
df_paga = pd.DataFrame()
df_paga['paga_dpt'] = adata_counts.obs['dpt_pseudotime'].values
correlation = df_paga['paga_dpt'].corr(df_ids['true_time'])
print('corr paga knn', knn, correlation)
sc.pl.paga(adata_counts, color=['leiden', 'group_id', 'dpt_pseudotime'],
title=['leiden (knn:' + str(knn) + ' ncomps:' + str(ncomps) + ')',
'group_id (ncomps:' + str(ncomps) + ')', 'pseudotime (ncomps:' + str(ncomps) + ')'])
# X = df_counts.values
'''
# palantir
if do_palantir == True:
print(palantir.__file__) # location of palantir source code
str_true_label = true_label.tolist()
str_true_label = [(i[1:]) for i in str_true_label]
str_true_label = pd.Series(str_true_label, index=counts.index)
norm_df = counts # palantir.preprocess.normalize_counts(counts)
# pca_projections, _ = palantir.utils.run_pca(norm_df, n_components=ncomps) #normally use
pca_projections = counts
dm_res = palantir.utils.run_diffusion_maps(pca_projections, knn=knn,
n_components=300) ## n_components=ncomps, knn=knn)
ms_data = palantir.utils.determine_multiscale_space(dm_res) # n_eigs is determined using eigengap
tsne = palantir.utils.run_tsne(ms_data)
palantir.plot.plot_cell_clusters(tsne, str_true_label)
# C108 for M12 connected' #M8n1000d1000 start - c107 #c1001 for bifurc n2000d1000 #disconnected n1000 c108, "C1 for M10 connected" # c10 for bifurcating_m4_n2000d1000
# c107 for T1_M1, C42 for T2_M1 disconnected
# C1 for M8_connected, C1005 for multi_M11 , 'C1006 for bifurc2'
pr_res = palantir.core.run_palantir(ms_data, early_cell=palantir_root, num_waypoints=500, knn=knn)
df_palantir = pd.read_csv(
'/home/shobi/Trajectory/Datasets/Toy3/palantir_pt.csv') # /home/shobi/anaconda3/envs/ViaEnv/lib/python3.7/site-packages/palantir
pt = df_palantir['pt']
correlation = pt.corr(true_time)
print('corr Palantir', correlation)
print('')
palantir.plot.plot_palantir_results(pr_res, tsne, n_knn=knn, n_comps=pca_projections.shape[1])
plt.show()
'''
# from sklearn.decomposition import PCA
# pca = PCA(n_components=ncomps)
# pc = pca.fit_transform(df_counts)
Xin = adata_counts.obsm['X_pca'][:, 0:ncomps]
# Xin = adata_counts.X
if dataset == 'Toy4':
jac_std_global = .15 # .15
else:
jac_std_global = 0.15 # .15#0.15 #bruge 1 til cyclic2, ellers 0.15
#
v0 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.3, root_user=root_user, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
random_seed=random_seed) # *.4 root=2,
v0.run_VIA()
super_labels = v0.labels
df_ids['pt'] = v0.single_cell_pt_markov
correlation = df_ids['pt'].corr(df_ids['true_time'])
print('corr via knn', knn, correlation)
super_edges = v0.edgelist
# v0.make_JSON(filename = 'Toy3_ViaOut_temp.js')
p = hnswlib.Index(space='l2', dim=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[1])
p.init_index(max_elements=adata_counts.obsm['X_pca'][:, 0:ncomps].shape[0], ef_construction=200, M=16)
p.add_items(adata_counts.obsm['X_pca'][:, 0:ncomps])
p.set_ef(50)
tsi_list = [] # find the single-cell which is nearest to the average-location of a terminal cluster in PCA space (
for tsi in v0.terminal_clusters:
loc_i = np.where(np.asarray(v0.labels) == tsi)[0]
val_pt = [v0.single_cell_pt_markov[i] for i in loc_i]
th_pt = np.percentile(val_pt, 50) # 50
loc_i = [loc_i[i] for i in range(len(val_pt)) if val_pt[i] >= th_pt]
temp = np.mean(adata_counts.obsm['X_pca'][:, 0:ncomps][loc_i], axis=0)
labelsq, distances = p.knn_query(temp, k=1)
tsi_list.append(labelsq[0][0])
print('Granular VIA iteration')
v1 = VIA(Xin, true_label, jac_std_global=jac_std_global, dist_std_local=1,
knn=knn,
too_big_factor=0.1,
super_cluster_labels=super_labels, super_node_degree_list=v0.node_degree_list,
super_terminal_cells=tsi_list, root_user=root_user, is_coarse=False,
x_lazy=0.95, alpha_teleport=0.99, preserve_disconnected=True, dataset='toy',
visual_cluster_graph_pruning=1, max_visual_outgoing_edges=2,
super_terminal_clusters=v0.terminal_clusters,
full_neighbor_array=v0.full_neighbor_array,
ig_full_graph=v0.ig_full_graph, full_distance_array=v0.full_distance_array,
csr_array_locally_pruned=v0.csr_array_locally_pruned, random_seed=random_seed) # root=1,
v1.run_VIA()
df_ids['pt1'] = v1.single_cell_pt_markov
correlation = df_ids['pt1'].corr(df_ids['true_time'])
print('corr via1 knn', knn, correlation)
labels = v1.labels
# v1 = PARC(adata_counts.obsm['X_pca'], true_label, jac_std_global=1, knn=5, too_big_factor=0.05, anndata= adata_counts, small_pop=2)
# v1.run_VIA()
# labels = v1.labels
print('start tsne')
n_downsample = 500
if len(labels) > n_downsample:
# idx = np.random.randint(len(labels), size=900)
np.random.seed(2357)
idx = np.random.choice(a=np.arange(0, len(labels)), size=len(labels), replace=False, p=None)
print('len idx', len(idx))
super_labels = np.asarray(super_labels)[idx]
labels = list(np.asarray(labels)[idx])
true_label = list(np.asarray(true_label)[idx])
sc_pt_markov = list(np.asarray(v1.single_cell_pt_markov[idx]))
# embedding = v0.run_umap_hnsw(adata_counts.obsm['X_pca'][idx, :], graph)
embedding = adata_counts.obsm['X_pca'][idx,
0:2] # umap.UMAP().fit_transform(adata_counts.obsm['X_pca'][idx, 0:5])
# embedding = TSNE().fit_transform(adata_counts.obsm['X_pca'][idx, :])
print('tsne downsampled size', embedding.shape)
else:
embedding = umap.UMAP().fit_transform(Xin) # (adata_counts.obsm['X_pca'])
print('tsne input size', adata_counts.obsm['X_pca'].shape)
# embedding = umap.UMAP().fit_transform(adata_counts.obsm['X_pca'])
idx = np.random.randint(len(labels), size=len(labels))
print('end tsne')
super_clus_ds_PCA_loc = sc_loc_ofsuperCluster_PCAspace(v0, v1, idx)
print('super terminal and sub terminal', v0.super_terminal_cells, v1.terminal_clusters)
knn_hnsw, ci_list = sc_loc_ofsuperCluster_embeddedspace(embedding, v0, v1, idx)
draw_trajectory_gams(embedding, super_clus_ds_PCA_loc, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v1.revised_super_terminal_clusters,
sub_terminal_clusters=v1.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
plt.show()
'''
draw_trajectory_dimred(embedding, ci_list, labels, super_labels, super_edges,
v1.x_lazy, v1.alpha_teleport, sc_pt_markov, true_label, knn=v0.knn,
final_super_terminal=v0.terminal_clusters,
title_str='Hitting times: Markov Simulation on biased edges', ncomp=ncomps)
'''
plt.show()
num_group = len(set(true_label))
line = np.linspace(0, 1, num_group)
f, (ax1, ax3) = plt.subplots(1, 2, sharey=True)
for color, group in zip(line, set(true_label)):
where = np.where(np.asarray(true_label) == group)[0]
ax1.scatter(embedding[where, 0], embedding[where, 1], label=group,
c=np.asarray(plt.cm.jet(color)).reshape(-1, 4))
ax1.legend(fontsize=6)
ax1.set_title('true labels')
ax3.set_title("Markov Sim PT ncomps:" + str(Xin.shape[1]) + '. knn:' + str(knn))
ax3.scatter(embedding[:, 0], embedding[:, 1], c=sc_pt_markov, cmap='viridis_r')
plt.show()
df_subset = | pd.DataFrame(adata_counts.obsm['X_pca'][:, 0:5], columns=['Gene0', 'Gene1', 'Gene2', 'Gene3', 'Gene4']) | pandas.DataFrame |
import pandas as pd
import numpy as np
from textblob import TextBlob
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
# File Names
TRAIN_TIMESERIES = 'input_csv_files/train_timeseries2.csv'
TEST_TIMESERIES = 'input_csv_files/test_timeseries2.csv'
TRAIN_TWEETER = 'input_csv_files/train_tweets.csv'
TEST_TWEETER = 'input_csv_files/test_tweets.csv'
SUB_FILE_NAME = 'output_files/SUBMISSION_GB_HUBER_FMSE_32.csv'
# Model Selection
model = ['r', 'l', 'g', 's']
# GB Parameters
LOSS = 'huber'
CRITERION = 'friedman_mse'
MAX_DEPTH = 32
# Configuration
ENABLE_TWEET = True
columns = ['close',
# 'blocks_size',
# 'cost_per_transaction',
# 'difficulty',
# 'est_transaction_volume_USD',
# 'miners_revenue',
# 'total_bitcoins',
# 'transaction_fees_USD',
# 'mempool_count_max',
# 'wallets_created',
'cost_per_transaction',
'est_transaction_volume_USD',
'est_transaction_volume',
'cost_per_transaction_percent' ,
'transaction_fees',
'n_transactions_per_block',
'utxo_count_max' ,
'utxo_count_min' ,
'mempool_size_max',
]
columns = list(set(columns))
# Read Files
data = pd.read_csv(TRAIN_TIMESERIES, index_col=0)
data = data[columns]
# print(data.head())
# Tweet Data file processing ::::::::::::::::::::::::::::::::::::::::::::::::::::
def process_tweet_data(file):
tweet_columns = ['text', 'retweet_count', 'favorite_count', 'follower_count', 'account']
# dtypes = {'created_date': 'datetime', 'text': 'str', 'retweet_count': 'int','follower_count':'int','account':'str'}
# parse_dates = ['created_date']
# tweet_data = pd.read_csv(TRAIN_TWEETER, parse_dates=parse_dates, encoding="iso-8859-1")
tweet_data = pd.read_csv(file, index_col=1, encoding="iso-8859-1")
tweet_data = tweet_data[tweet_columns]
# print(tweet_data.head())
# tweet_data.rename(index = {'created_date': 'date'}, inplace = True)
tweet_data.index.names = ['date']
def sentiment_calc(text):
try:
return TextBlob(text).sentiment
except:
return None
tweet_data['sentiment'] = tweet_data['text'].apply(sentiment_calc)
def sentiment_split1(x):
try:
return x[0]
except:
return None
def sentiment_split2(x):
try:
return x[1]
except:
return None
tweet_data['pol'] = tweet_data['sentiment'].apply(sentiment_split1)
tweet_data['sub'] = tweet_data['sentiment'].apply(sentiment_split2)
# tweet_data['date'] = tweet_data.index
# tweet_data = tweet_data.drop(tweet_data[tweet_data.created_date == '5'].index)
# def convert_date(x):
# try:
# return x.date()
# except (ValueError, TypeError):
# return None
# print(tweet_data.head())
# tweet_data['date'] = tweet_data['created_date'].apply(convert_date)
clean_tw_data = tweet_data[['retweet_count','favorite_count', 'follower_count', 'account', 'pol', 'sub']]
# clean_tw_data.set_index('date')
clean_tw_data.drop(['account'], axis=1, inplace=True)
clean_tw_data.drop(['favorite_count'], axis=1, inplace=True)
return clean_tw_data
# -----------------------------------------------------------------------------------------
train_tweet_data = process_tweet_data(TRAIN_TWEETER)
test_tweet_data = process_tweet_data(TEST_TWEETER)
# print(train_tweet_data.head())
# Merge Tweet Data with Timeseries Data
data_close = data[columns]
# print(data_close.head())
data_w_tweets = train_tweet_data.merge(data_close, left_index=True, right_index=True)
# print(data_w_tweets.head())
if ENABLE_TWEET:
data = data_w_tweets
print(len(data))
# data[data.isnull().any(axis=1)]
data = data.dropna(how='any')
# Split Training data
data_train = data.sample(frac=0.8, random_state=0)
data_test = data.drop(data_train.index)
# train_stat = data_train.describe()
# train_stat.pop('close')
# train_stat = train_stat.transpose()
y_train = data_train.pop("close")
y_test = data_test.pop("close")
# sc = StandardScaler()
# normed_X_train = sc.fit_transform(data_train)
# normed_X_test = sc.transform(data_test)
normed_X_train = data_train
normed_X_test = data_test
# Model Fitting
reg = GradientBoostingRegressor(loss=LOSS, criterion = CRITERION, max_depth=MAX_DEPTH)
reg.fit(normed_X_train, y_train)
y_pred = reg.predict(normed_X_test)
y_pred = y_pred.round(decimals=2)
score = reg.score(normed_X_test, y_test)
print(score)
y_pred_df = pd.DataFrame(y_pred, columns=['close'], index=y_test.index)
# Prepare Submission File
columns.remove('close')
data_sub = pd.read_csv(TEST_TIMESERIES, index_col=0)
data_sub = data_sub[columns]
data_close_sub = data_sub[columns]
data_w_tweets_sub = test_tweet_data.merge(data_close_sub, left_index=True, right_index=True)
# Enable Tweet Data
if ENABLE_TWEET:
data_sub = data_w_tweets_sub
data_sub = data_sub.dropna(how='any')
# normalized_sub = sc.transform(data_sub)
normalized_sub = data_sub
# Submission Prediction
y_sub = reg.predict(normalized_sub)
y_sub = y_sub.round(decimals=2)
y_sub_df = | pd.DataFrame(y_sub, columns=['close'], index=data_sub.index) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 15 17:14:55 2021
@author: sergiomarconi
"""
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from mlxtend.classifier import StackingCVClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from mlxtend.classifier import SoftmaxRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import normalize
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import RUSBoostClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import SVC
import category_encoders as ce
species_to_genus = {
'2PLANT':"NA",
'ABBA':"AB",
'ABLAL':"AB",
"ABLO":'AB',
"ABMA":"AB",
"ABAM":"AB",
'ACNE2': "AC",
'ACNEN': "AC",
'ACPE':"AC",
'ACRU': "AC",
'ACSA3' : "AC",
'ACSA2' :"AC",
'ACFA':"AC2",
'ACKO': "AC2",
'ACGR':"AC2",
'AIAL' : "AI",
'ALRU2': "AL",
'ALVI5':'AL',
'AMLA' : "AM",
'AMEL':'AM',
'ARVIM':"AR",
'BEAL2': "BE",
'BEGL/BENA':"BE",
'BEPA': "BE",
'BELE': "BE",
'BEPO': "BE",
'BETUL':'BE',
'BENE4' : "BE",
'BUBU':"BU",
'BUSI':"BU",
'BOSU2':"BO",
'CACA18':"CA1",
'CADE27':"CA2",
'CAGL8':"CA3",
'CAOV2':"CA3",
'CAOV3':"CA3",
'CAAQ2':'CA',
'CACO15': "CA3",
'CATO6':"CA3",
'CAIL2':"CA3",
'CECA4':"CE1",
'CELA':"CE2",
'CEOC':"CE2",
'CODR':"CO",
'CODI8':"CO",
'COFL2':"CO2",
'DIVI5':"DI",
'ELAN':"EL",
'FAGR':"FA",
'FRAM2':"FR",
'FRAXI':'FR',
'FRNI':'FR',
'LARIX':'LA',
'ILAN':'IL',
'FRPE':"FR",
'GYDI':"GY",
'GUOF':"GU",
'GUSA':"GU",
'GLTR':"GL",
'HALES':"HA",
'JUNI':"JU1",
'JUNIP':"JU2",
'JUVI':"JU2",
'JUOS':"JU2",
'LIST2':"LI1",
'LITU':"LI2",
'MAPO':"MA",
'MAFR':'MA',
'MAGNO':'MA',
'MORU2':"MO",
'NYBI':"NY",
'NYSY':"NY",
'NYAQ2':'NY',
'OXYDE':"OX",
'OXAR':"OX",
'OSVI':'OS',
'PICEA':"PI1",
'PIAL3':"PI2",
'PIAC':"PI3",
'PICO':"PI2",
'PIEL':"PI2",
'PIEN':"PI2",
'PIEC2':"PI2",
'PIFL2':"PI2",
'PIGL':"PI2",
'PIMA':"PI2",
'PINUS':'PI2',
'PIPA2':"PI2",
'PIPO':"PI2",
'PIRU':"PI2",
'PIPOS':"PI2",
'PIPU5':"PI2",
'PIST':"PI2",
'PITA':"PI2",
'PIGL2':"PI2",
'PIED':"PI",
'PIJE':"PI",
'PIRI':'PI',
'PIVI2':'PI',
'PINUS':"PI2",
'PLOC':"PL",
'POTR5':"PO",
'POGR4':"PO",
'PODE3':"PO",
'PRVE':"PR",
'PRVI':"PR",
'PRAV':'PR',
'PRSE2': "PR",
'PRAN3':"PR",
'PSME':"PS",
'QUAL':"QU",
'QUCO2':"QU",
'QUCH':"QU",
'QUCH2':"QU",
'QUHE2':'QU',
'QUERC':"QU",
'QUGE2':"QU",
'QUSH':"QU",
'QULA2':'QU',
"QUPH":"QU",
'QULA3':"QU",
'QUERCUS':"QU",
'QULY':"QU",
'QUMA3':"QU",
'QUMA13':"QU",
'THUJA':"TU",
'PISA2':"PI2",
'TABR2':"TA",
'QUDO':"QU",
'MEPO5':'ME',
'QUMI':"QU",
'QUFA':"QU",
'QUMO4':"QU",
'QUMU':"QU",
'QUNI':"QU",
'QUKE':"QU",
'QUVE':'QU',
'QUWI2':"QU",
'QUPA5':"QU",
'QURU':"QU",
'QUST':"QU",
'RHGL':"RH",
"ROPS":"RO",
'SASSA':'SA',
'SALIX':'SA',
'SYOC':"SY",
'SILA20':"SI",
'SWMA2':"SW",
'TRSE6':"TR",
'TSCA':"TS",
'TSHE':"TS",
'TIAM':"TI",
'TAHE':"TA",
'ULAL':"UL",
'ULAM':"UL",
'ULMUS':"UL",
'ULCR':"UL",
'ULRU':"UL",
}
import os
os.chdir("/blue/ewhite/s.marconi/NeonSpeciesClassification/")
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import ADASYN
from imblearn.under_sampling import TomekLinks
from collections import Counter
from src.hdr import *
from sklearn.preprocessing import normalize
import numpy as np
import pandas as pd
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import BaggingClassifier
from mlxtend.classifier import StackingClassifier
from sklearn.linear_model import LogisticRegressionCV
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
from mlxtend.classifier import SoftmaxRegression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from imblearn.ensemble import BalancedRandomForestClassifier
from imblearn.ensemble import RUSBoostClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
domainid = {
"GUAN": "D04",
"BART": "D01",
"HARV": "D01",
"STEI": "D05",
"TREE": "D05",
"UNDE": "D05",
"SERC": "D02",
"SCBI": "D02",
"OSBS": "D03",
"MLBS": "D07",
"DELA": "D08",
"TALL": "D08",
"BLAN": "D02",
"UKFS": "D06",
"RMNP": "D10",
"BONA": "D19",
"MOAB": "D13",
"DEJU": "D19",
"LENO": "D08",
"DSNY": "D03",
"JERC": "D03",
"KONZ": "D06",
"CLBJ": "D11",
"YELL": "D12",
"NIWO": "D13",
"ABBY": "D16",
"WREF": "D16",
"SJER": "D17",
"PUUM": "D20"
}
def categorical_encoder(cats,y):
import category_encoders as ce
le = LabelEncoder()
le.fit(y)
le = le.transform(y)
enc = ce.LeaveOneOutEncoder(cols=['siteID'])
# enc = enc.fit(cats).transform(cats)
train_enc = enc.fit_transform(cats,le)
return(train_enc)
# prepare input data
def prepare_inputs(X_train, X_test, cats = ['domainID', 'siteID']):
X_train_enc, X_test_enc = list(), list()
# label encode each column
for i in cats:
le = LabelEncoder()
le.fit(X_train[i])
# encode
train_enc = le.transform(X_train[i])
test_enc = le.transform(X_test[i])
# store
X_train_enc.append(train_enc)
X_test_enc.append(test_enc)
return X_train_enc, X_test_enc
min_class = Counter(y_train.taxonID)
unsuited = pd.DataFrame(min_class.items())
only_one = unsuited.iloc[:,1]<2
unsuited = unsuited[only_one][0]
#unsuited = pd.DataFrame(min_class.items())
doble_unsuited = X_train[y_train.taxonID.isin(unsuited)]
X_train=X_train.append(doble_unsuited)
doble_unsuited = y_train[y_train.taxonID.isin(unsuited)]
y_train=y_train.append(doble_unsuited)
min_class = Counter(y_train.taxonID)
min_class = min_class[min(min_class, key=min_class.get)]
min_class
# dimensionality reduction
def kld_reduction(brick, kld_out):
from sklearn import preprocessing
refl = brick.drop(['individualID'], axis=1)
scaler = preprocessing.StandardScaler().fit(refl)
refl = scaler.transform(refl)
kld_groups = getClusters(refl, numBands = 15)
np.savetxt(kld_out, kld_groups, delimiter=",")
individualID=brick["individualID"]
#
brick = brick.drop(columns=['individualID'])
brick = brick.values
all_data = np.zeros([brick.shape[0],1])
for jj in np.unique(kld_groups):
which_bands = kld_groups == jj
#min
new_col = np.apply_along_axis(min, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#mean
new_col = np.apply_along_axis(np.mean, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#max
new_col = np.apply_along_axis(max, 1, brick[:,which_bands])[...,None]
all_data = np.append(all_data, new_col, 1)
#reappend individualID on the all data dataframe
all_data = pd.DataFrame(all_data)
all_data["individualID"] = individualID
all_data = all_data.drop([0], axis=1)
#
#shift back individualID to first row
cols = list(all_data.columns)
cols = [cols[-1]] + cols[:-1]
all_data = all_data[cols]
return all_data
brick = pd.read_csv("./data/features_0411.csv") #"./data/brdf_spectra_2104b.csv")
metadata = pd.read_csv("./data/metadata_0411.csv") #"./data/metadata_2104b.csv")
metadata = metadata[["individualID", "groupID", "plotID","siteID","elevation","latitude", "longitude",
"taxonID"]]
kld_out="./data/tmp_grps.csv"#"./data/kld_grps_2104b.csv"
nbands = brick.shape[0]
brick.iloc[:,1:nbands] = normalize(brick.iloc[:,1:nbands])
brick = kld_reduction(brick, kld_out)
foo = brick.drop(columns=[ 'individualID'])
ele1 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_all_df_vst.csv")
ele2 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_df_vst.csv")
ele3 = pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR//elevation_sp_3.csv")
ele4 = | pd.read_csv("/blue/ewhite/s.marconi/Chapter3/neonVegWrangleR/elevation_sp_2.csv") | pandas.read_csv |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-20')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-01-22')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, 310, pd.Timestamp('2015-01-09')),
(10, 311, pd.Timestamp('2015-01-15')),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-02-05')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
end_date
) for end_date in pd.date_range('2015-02-06', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(0, 201, pd.Timestamp('2015-02-10')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-02-10')
)
])
twoq_next = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-11')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-16')] +
[cls.create_expected_df_for_factor_compute(
[(0, 200, pd.Timestamp('2015-01-12')),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp('2015-01-17'))],
pd.Timestamp('2015-01-20')
)] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-21', '2015-02-10')]
)
return {
1: oneq_next,
2: twoq_next
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateWindows(NextEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(bz.data(events), columns)
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp('2015-01-14')
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-09'),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp('2015-01-20')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20')],
'estimate': [130., 131., 230., 231.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30
})
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [140., 240.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40
})
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-02-10')],
'estimate': [150., 250.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50
})
return pd.concat([
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
])
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame({
SID_FIELD_NAME: 0,
'ratio': (-1., 2., 3., 4., 5., 6., 7., 100),
'effective_date': (pd.Timestamp('2014-01-01'), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp('2015-01-07'),
# Split before Q1 event
pd.Timestamp('2015-01-09'),
# Split before Q1 event
pd.Timestamp('2015-01-13'),
# Split before Q1 event
pd.Timestamp('2015-01-15'),
# Split before Q1 event
pd.Timestamp('2015-01-18'),
# Split after Q1 event and before Q2 event
pd.Timestamp('2015-01-30'),
# Filter out - this is after our date index
pd.Timestamp('2016-01-01'))
})
sid_10_splits = pd.DataFrame({
SID_FIELD_NAME: 10,
'ratio': (.2, .3),
'effective_date': (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp('2015-01-07'),
# Apply a single split before Q1 event.
pd.Timestamp('2015-01-20')),
})
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame({
SID_FIELD_NAME: 20,
'ratio': (.4, .5, .6, .7, .8, .9,),
'effective_date': (
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18'),
pd.Timestamp('2015-01-30')),
})
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame({
SID_FIELD_NAME: 30,
'ratio': (8, 9, 10, 11, 12),
'effective_date': (
# Split before the event and before the
# split-asof-date.
pd.Timestamp('2015-01-07'),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp('2015-01-09'),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-15'),
pd.Timestamp('2015-01-18')),
})
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame({
SID_FIELD_NAME: 40,
'ratio': (13, 14),
'effective_date': (
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-22')
)
})
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame({
SID_FIELD_NAME: 50,
'ratio': (15, 16),
'effective_date': (
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')
)
})
return pd.concat([
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
])
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows,
ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-12')
]),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150. * 1 / 16, pd.Timestamp('2015-01-09')),
], pd.Timestamp('2015-01-13')),
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))
], pd.Timestamp('2015-01-14')),
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11, pd.Timestamp('2015-01-09')),
(40, 140., pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09')),
], end_date)
for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-01-21')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 111*.3, pd.Timestamp('2015-01-22')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-01-30', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-01-20')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-01-20')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 140.*13*14, pd.Timestamp('2015-01-09')),
(50, 150., pd.Timestamp('2015-01-09'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311*.3, pd.Timestamp('2015-02-05')),
(20, 221*.8*.9, pd.Timestamp('2015-02-10')),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240.*13*14, pd.Timestamp('2015-02-10')),
(50, 250., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-01-19')] +
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131*11*12, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-20', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101*7, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121*.7*.8*.9, pd.Timestamp('2015-02-10')),
(30, 131*11*12, pd.Timestamp('2015-01-20')),
(40, 140. * 13 * 14, pd.Timestamp('2015-02-10')),
(50, 150., pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousWithSplitAdjustedWindows(PreviousWithSplitAdjustedWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousSplitAdjustedEstimatesLoader(
bz.data(events),
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=['estimate'],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 130*1/10, cls.window_test_start_date),
(30, 131*1/10, pd.Timestamp('2015-01-09')),
(40, 140, pd.Timestamp('2015-01-09')),
(50, 150.*1/15*1/16, pd.Timestamp('2015-01-09'))],
pd.Timestamp('2015-01-09')
),
cls.create_expected_df_for_factor_compute(
[(0, 100*1/4, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*5/3, cls.window_test_start_date),
(20, 121*5/3, pd.Timestamp('2015-01-07')),
(30, 230*1/10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/15*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-12')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250.*1/16, pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-13')
),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07')),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp('2015-01-10')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-14')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100*5, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120*.7, cls.window_test_start_date),
(20, 121*.7, pd.Timestamp('2015-01-07')),
(30, 230*11, cls.window_test_start_date),
(40, 240, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-15', '2015-01-16')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100*5*6, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110*.3, pd.Timestamp('2015-01-09')),
(10, 111*.3, pd.Timestamp('2015-01-12')),
(20, 120*.7*.8, cls.window_test_start_date),
(20, 121*.7*.8, pd.Timestamp('2015-01-07')),
(30, 230*11*12, cls.window_test_start_date),
(30, 231, pd.Timestamp('2015-01-20')),
(40, 240*13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-21')
),
cls.create_expected_df_for_factor_compute(
[(0, 200 * 5 * 6, pd.Timestamp('2015-01-12')),
(10, 110 * .3, pd.Timestamp('2015-01-09')),
(10, 111 * .3, pd.Timestamp('2015-01-12')),
(20, 220 * .7 * .8, cls.window_test_start_date),
(20, 221 * .8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
pd.Timestamp('2015-01-22')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6, pd.Timestamp('2015-01-12')),
(10, 310*.3, pd.Timestamp('2015-01-09')),
(10, 311*.3, pd.Timestamp('2015-01-15')),
(20, 220*.7*.8, cls.window_test_start_date),
(20, 221*.8, pd.Timestamp('2015-01-17')),
(40, 240 * 13 * 14, pd.Timestamp('2015-01-15')),
(50, 250., pd.Timestamp('2015-01-12'))],
end_date
) for end_date in pd.date_range('2015-01-23', '2015-01-29')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 200*5*6*7, pd.Timestamp('2015-01-12')),
(10, 310*.3, | pd.Timestamp('2015-01-09') | pandas.Timestamp |
"""
Tools for hydrological regionalization.
"""
import logging
from pathlib import Path
import numpy as np
import pandas as pd
import statsmodels.api as sm
import xarray as xr
import ravenpy.models as models
from . import coords
LOGGER = logging.getLogger("PYWPS")
regionalisation_data_dir = Path(__file__).parent.parent / "data" / "regionalisation"
def regionalize(
method,
model,
nash,
params=None,
props=None,
target_props=None,
size=5,
min_NSE=0.6,
**kwds,
):
"""Perform regionalization for catchment whose outlet is defined by coordinates.
Parameters
----------
method : {'MLR', 'SP', 'PS', 'SP_IDW', 'PS_IDW', 'SP_IDW_RA', 'PS_IDW_RA'}
Name of the regionalization method to use.
model : {'HMETS', 'GR4JCN', 'MOHYSE'}
Model name.
nash : pd.Series
NSE values for the parameters of gauged catchments.
params : pd.DataFrame
Model parameters of gauged catchments. Needed for all but MRL method.
props : pd.DataFrame
Properties of gauged catchments to be analyzed for the regionalization. Needed for MLR and RA methods.
target_props : pd.Series or dict
Properties of ungauged catchment. Needed for MLR and RA methods.
size : int
Number of catchments to use in the regionalization.
min_NSE : float
Minimum calibration NSE value required to be considered as a donor.
kwds : {}
Model configuration parameters, including the forcing files (ts).
Returns
-------
(qsim, ensemble)
qsim : DataArray (time, )
Multi-donor averaged predicted streamflow.
ensemble : Dataset
q_sim : DataArray (realization, time)
Ensemble of members based on number of donors.
parameter : DataArray (realization, param)
Parameters used to run the model.
"""
# TODO: Include list of available properties in docstring.
# TODO: Add error checking for source, target stuff wrt method chosen.
# Select properties based on those available in the ungauged properties DataFrame.
if isinstance(target_props, dict):
ungauged_properties = pd.Series(target_props)
elif isinstance(target_props, pd.Series):
ungauged_properties = target_props
elif isinstance(target_props, pd.DataFrame):
ungauged_properties = target_props.to_series()
else:
raise ValueError
cr = coords.realization(1 if method == "MLR" else size)
cp = coords.param(model)
# Filter on NSE
valid = nash > min_NSE
filtered_params = params.where(valid).dropna()
filtered_prop = props.where(valid).dropna()
# Check to see if we have enough data, otherwise raise error
if len(filtered_prop) < size and method != "MLR":
raise ValueError(
"Hydrological_model and minimum NSE threshold \
combination is too strict for the number of donor \
basins. Please reduce the number of donor basins OR \
reduce the minimum NSE threshold."
)
# Rank the matrix according to the similarity or distance.
if method in ["PS", "PS_IDW", "PS_IDW_RA"]: # Physical similarity
dist = similarity(filtered_prop, ungauged_properties)
else: # Geographical distance.
dist = distance(filtered_prop, ungauged_properties)
# Series of distances for the first `size` best donors
sdist = dist.sort_values().iloc[:size]
# Pick the donors' model parameters and catchment properties
sparams = filtered_params.loc[sdist.index]
sprop = filtered_prop.loc[sdist.index]
# Get the list of parameters to run
reg_params = regionalization_params(
method, sparams, sprop, ungauged_properties, filtered_params, filtered_prop
)
# Run the model over all parameters and create ensemble DataArray
m = models.get_model(model)()
qsims = list()
for i, params in enumerate(reg_params):
kwds["params"] = params
kwds["run_name"] = f"reg_{i}"
m(**kwds)
qsims.append(m.q_sim.copy(deep=True))
qsims = xr.concat(qsims, dim=cr)
# 3. Aggregate runs into a single result -> dataset
if method in [
"MLR",
"SP",
"PS",
]: # Average (one realization for MLR, so no effect).
qsim = qsims.mean(dim="realization", keep_attrs=True)
elif (
"IDW" in method
): # Here we are replacing the mean by the IDW average, keeping attributes and dimensions.
qsim = IDW(qsims, sdist)
else:
raise ValueError("No matching algorithm for {}".format(method))
# Metadata handling
# TODO: Store the basin_name
# Create a DataArray for the parameters used in the regionalization
param_da = xr.DataArray(
reg_params,
dims=("realization", "param"),
coords={"param": cp, "realization": cr},
attrs={"long_name": "Model parameters used in the regionalization."},
)
ens = xr.Dataset(
data_vars={"q_sim": qsims, "parameter": param_da},
attrs={
"title": "Regionalization ensemble",
"institution": "",
"source": "RAVEN V.{} - {}".format(m.version, model),
"history": "Created by raven regionalize.",
"references": "",
"comment": "Regionalization method: {}".format(method),
},
)
# TODO: Add global attributes (model name, date, version, etc)
return qsim, ens
def read_gauged_properties(properties):
"""Return table of gauged catchments properties over North America.
Returns
-------
pd.DataFrame
Catchment properties keyed by catchment ID.
"""
f = regionalisation_data_dir / "gauged_catchment_properties.csv"
proptable = | pd.read_csv(f, index_col="ID") | pandas.read_csv |
#!/usr/bin/env python
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.metrics import precision_recall_fscore_support, mean_squared_error
from collections import Counter
import math
import xgboost as xgb
import pickle
import sys, os
from acheron.helpers import model_evaluators
def is_valid_type(val):
# covers core floats, numpy floating and complex floating
if isinstance(val, (float, np.inexact)):
if math.isnan(val) or np.isnan(val):
return False
else:
return True
# covers core ints, numpy ints (signed unsigned), longs, shorts, bytes
# TODO check for bytes later, as they do NOT like math eqns when exponential
elif isinstance(val, (int, np.integer)):
return True
# covers core and np strings, excludes unicode
elif isinstance(val, (str, np.str_)):
if val.upper() in ['INVALID', 'NAN']:
return False
else:
return True
else:
return False
def make_mask(label_matrix, k):
"""
Takes in a label matrix and saves a mask of which labels are valid
for each index, this allows the model to keep a sample when only a single
columns is invalid, instead of discarding the sample.
If doing kfold cross validation (k != 1), there must be at least k samples
in each class for that class to be considered valid
"""
mask = np.zeros((label_matrix.values.shape), dtype='bool')
invalid_classes = {}
# if any class has fewer than k samples, it is marked as invalid
for attribute in label_matrix.columns:
class_counter = Counter(label_matrix[attribute])
invalid_classes[attribute] = []
for key in class_counter:
if class_counter[key] < k:
invalid_classes[attribute].append(key)
for i, row in enumerate(label_matrix.values):
for j, col in enumerate(row):
if not is_valid_type(col):
continue
if col in invalid_classes[label_matrix.columns[j]]:
continue
mask[i,j] = True
return pd.DataFrame(data=mask, columns = label_matrix.columns,
index = label_matrix.index)
def make_split(label_matrix, mask, k, samples):
"""
Takes a label matrix, splits it according to k fold cross validation
but only for valid samples. Produces a random split each time (stratified)
"""
assert(2<=k<=255) # if exceeding 255, change dtype below to uint16
split_df = pd.DataFrame(data=np.zeros(label_matrix.shape),
columns=label_matrix.columns, index = label_matrix.index, dtype='uint8')
split_df = split_df[~split_df.index.duplicated(keep='first')]
for col in label_matrix.columns:
# which labels are valid in this specific column
valid_labels = label_matrix[col].values[mask[col].values]
# matching sample name for each i in valid_labels
valid_samples = label_matrix.index.values[mask[col].values]
if len(valid_samples) == 0:
print("All samples in column "+col+" are invalid, skipping split")
continue
# in the event of duplicates, keep only the first seen instance
processed = []
# we also need to factor in that we only have the samples in /samples,
# where a datasheet might have thousands of valid, but extra datapoints
#seen_bool_mask = np.array([i in samples and i not in duplicates for i in valid_samples])
seen_bool_mask = []
for i in valid_samples:
if i in processed:
seen_bool_mask.append(False)
else:
processed.append(i)
if i in samples:
seen_bool_mask.append(True)
else:
seen_bool_mask.append(False)
seen_bool_mask = np.array(seen_bool_mask)
final_labels = valid_labels[seen_bool_mask]
final_samples = valid_samples[seen_bool_mask]
# at this point, we only have labels and samples that are eligible
# for machine learning
skf = StratifiedKFold(n_splits=k, shuffle=True)
num_samples = len(final_samples)
splits = enumerate(skf.split(np.zeros((num_samples,k)),final_labels))
for i, split in splits:
# pull the array of values assigned to the testing set,
# label these genomes as per the fold they belong to
for sample in final_samples[split[1]]:
split_df.at[sample,col] = i
return split_df
def load_data(dataset_name, label_name, trial, type, num_feats, k, attribute):
"""
load requested dataset, mask, and split
"""
features = pd.read_pickle("data/{}/features/{}_matrix.df".format(dataset_name, type))
labels = pd.read_pickle("data/{}/labels/{}.df".format(dataset_name,label_name))[attribute]
mask = pd.read_pickle("data/{}/features/masks/{}_{}.df".format(dataset_name,type,label_name))[attribute]
if k!=1:
split = pd.read_pickle("data/{}/splits/split{}_{}_{}_{}xCV.df".format(dataset_name,trial,type,label_name,k))
else:
split = []
features, labels = apply_mask(features, labels, mask)
return features, labels, mask, split
def train_model(features, label, model_type, num_classes):
"""
Converts feature and label matrices in a trained model
Sci-kit models are at the end, as they share a fit method
"""
# XGBoost
if model_type.upper() in ['XGB', 'XGBOOST']:
if num_classes == 2:
objective = 'binary:logistic'
else:
objective = 'multi:softmax'
# this is is probably going to suck for memory, so lets revist XGBClassifier
# if we explode past our ram usage on this step
xgb_matrix = xgb.DMatrix(features.values, label, feature_names=features.columns)
params = {'objective':objective, 'num_class': num_classes}
#params = {'objective':objective}
booster = xgb.train(params, xgb_matrix)
return booster
# Artificial Neural Network
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
from keras.utils import np_utils, to_categorical
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils, to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
cat_labels = to_categorical(label, num_classes)
patience = 16
early_stop = EarlyStopping(monitor='loss', patience=patience, verbose=1, min_delta=0.005, mode='auto')
reduce_LR = ReduceLROnPlateau(monitor='loss', factor= 0.1, patience=(patience/2), verbose = 1, min_delta=0.005,mode = 'auto', cooldown=0, min_lr=0)
num_feats = len(features.columns)
model = Sequential()
model.add(Dense(int(((num_feats+num_classes)/2)),activation='relu',input_dim=(num_feats)))
model.add(Dropout(0.5))
model.add(Dense(num_classes, kernel_initializer='uniform', activation='softmax'))
if num_classes == 2:
loss = "binary_crossentropy"
else:
loss = "poisson"
model.compile(loss=loss, metrics=['accuracy'], optimizer='adam')
model.fit(features.values, cat_labels, epochs=100, verbose=1, callbacks=[early_stop, reduce_LR])
return model
# Support Vector Classifier
# https://scikit-learn.org/stable/modules/svm.html#classification
if model_type.upper() in ['SVC', 'SVM']:
from sklearn import svm
model = svm.SVC()
# Support Vector Regressor
# https://scikit-learn.org/stable/modules/svm.html#regression
elif model_type.upper() in ['SVR']:
from sklearn import svm
model = svm.SVR()
# Stochastic Gradient Descent Classifier
# https://scikit-learn.org/stable/modules/sgd.html#classification
elif model_type.upper() in ['SGDC']:
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(loss="hinge", penalty="l2", max_iter=25)
# Perceptron
# https://scikit-learn.org/stable/modules/linear_model.html#perceptron
elif model_type.upper() in ['PERC']:
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", penalty=None)
# Passive Aggressive Algorithms
# https://scikit-learn.org/stable/modules/linear_model.html#passive-aggressive-algorithms
# https://www.geeksforgeeks.org/passive-aggressive-classifiers
elif model_type.upper() in ['PAC']:
from sklearn.linear_model import PassiveAggressiveClassifier
model = PassiveAggressiveClassifier(max_iter=100)
# Nearest Neighbours Classifier
# https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-classification
elif model_type.upper() in ['NNC']:
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
# Nearest Neighbours Regressor
# https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-regression
elif model_type.upper() in ['NNR']:
from sklearn.neighbors import KNeighborsRegressor
model = KNeighborsRegressor(n_neighbors=3)
# Gaussian Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#gaussian-naive-bayes
elif model_type.upper() in ['GNB']:
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
# Multinomial Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#multinomial-naive-bayes
elif model_type.upper() in ['MNB']:
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
# Categorical Naive Bayes
# https://scikit-learn.org/stable/modules/naive_bayes.html#categorical-naive-bayes
elif model_type.upper() in ['CNB']:
from sklearn.naive_bayes import CategoricalNB
model = CategoricalNB()
# Decision Tree Classifier
# https://scikit-learn.org/stable/modules/tree.html#classification
elif model_type.upper() in ['DTC']:
from sklearn import tree
model = tree.DecisionTreeClassifier()
# Decision Tree Regressor
# https://scikit-learn.org/stable/modules/tree.html#regression
elif model_type.upper() in ['DTR']:
from sklearn import tree
model = tree.DecisionTreeRegressor()
# AdaBoost
# https://scikit-learn.org/stable/modules/ensemble.html#adaboost
elif model_type.upper() in ['ADA']:
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier()
# Gradient Boosted Decision Trees
# https://scikit-learn.org/stable/modules/ensemble.html#gradient-tree-boosting
elif model_type.upper() in ['GBDT']:
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier()
# Multi-layer Perceptron Classifier
# https://scikit-learn.org/stable/modules/neural_networks_supervised.html#multi-layer-perceptron
elif model_type.upper() in ['MLPC']:
from sklearn.neural_network import MLPClassifier
model = MLPClassifier()
else:
raise Exception("model type {} not defined".format(model_type))
model.fit(features.values, label)
return model
def train_hyper_model(x_train, y_train, x_val, y_val, model_type, num_classes):
"""
Trains a hyperparameter optimized model
"""
from acheron.workflows import hyp
best_model, best_params = hyp.get_best(x_train, y_train, x_val, y_val, model_type, num_classes)
return best_model, best_params
"""
Unsure if saving required, to do without
import time
from random import seed
from random import random
seed(time.time())
test_id = random()
save_path = "data/hyp_data/{}/".format(test_id)
os.makedirs(save_path, exist_ok=False)
for i, data_chunk in enumerate([x_train, y_train, x_val, y_val]):
if i%2 == 0:
data_chunk.to_pickle("{}{}.pkl".format(save_path,i))
else:
np.save("{}{}.npy".format(save_path,i),data_chunk)
trials = Trials()
# https://towardsdatascience.com/an-example-of-hyperparameter-optimization-on-xgboost-lightgbm-and-catboost-using-hyperopt-12bc41a271e
# Search Space Subject to Change!!
if model_type.upper() in ['XGB','XGBOOST']:
search_params = {
'learning_rate': hp.choice('learning_rate', np.arange(0.05, 0.31, 0.05)),
'max_depth': hp.choice('max_depth', np.arange(1, 8, 1, dtype=int)),
'min_child_weight': hp.choice('min_child_weight', np.arange(1, 8, 1, dtype=int)),
'colsample_bytree': hp.choice('colsample_bytree', np.arange(0.3, 0.8, 0.1)),
'subsample': hp.uniform('subsample', 0.8, 1),
'num_class': num_classes,
'test_id': test_id
}
best_index = fmin(
fn=hyp.xgboost_objective, space=search_params,
algo=tpe.suggest, max_evals=100, trials=trials)
best_params = space_eval(search_params, best_index)
if num_classes == 2:
best_params['objective'] = 'binary:logistic'
else:
best_params['objective'] = 'multi:softmax'
best_params['n_estimators'] = 10
best_params['num_class'] = num_classes
xgb_matrix = xgb.DMatrix(x_train.values, y_train, feature_names=x_train.columns)
booster = xgb.train(best_params, xgb_matrix)
return booster, best_params
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
from acheron.workflows import hyp
best_run, best_model = optim.minimize(
model=hyp.create_model,
data=load_hyp_data,
algo=tpe.suggest,
max_evals=10,
trials=Trials(),
keep_temp=True)
return best_model, best_run
"""
#else:
#raise Exception("model type {} not defined".format(model_type))
"""
# Minimal Cost-Complexity Pruning
# https://scikit-learn.org/stable/modules/tree.html#minimal-cost-complexity-pruning
elif model_type.upper() in ['MCCP']:
from sklearn import tree
model = tree.DecisionTreeClassifier()
path = model.cost_complexity_pruning_path(features.values, labels)
ccp_alphas, impurities = path.ccp_alphas, path.impurities
models = []
for ccp_alpha in ccp_alphas:
clf = DecisionTreeClassifier(ccp_alpha=ccp_alpha)
clf.fit(features.values, labels)
clfs.append(clf)
# now use validation set to see which model did best, use that alpha to train final model
"""
def predict(model, features, model_type):
"""
Takes a model and a feature set, returns an label like array of predictions
"""
if model_type.upper() in ['XGB', 'XGBOOST']:
xgb_matrix = xgb.DMatrix(features.values, feature_names = features.columns)
return [round(i) for i in model.predict(xgb_matrix, validate_features=True)]
elif model_type.upper() in ['ANN','KERAS','TF','TENSORFLOW']:
# This will be in categorical form, need to decode it
prediction = model.predict_classes(features)
#return np.argmax(prediction, axis=1)
return prediction
else:
try:
return [round(i) for i in model.predict(features)]
except:
raise Exception("model type {} not defined".format(model_type))
def evaluate_model(predicted, actual, model_type, dilutions, attribute, encoder):
"""
Evaluates how well a model did (accuracy)
For mic modules, also reports off-by-one accuracy and error rates
Takes encoded class labels (0,1,2) not decoded values (2,4,8,16)
"""
# this df will eventually get all info about the test
direct_accuracy = np.sum([predicted[i]==actual[i] for i in range(len(predicted))])/len(predicted)
dilutional_accuracies = {}
find_errors = False
if len(dilutions) > 0:
find_errors = True
for dilution in dilutions:
total = 0
correct = 0
for i in range(len(predicted)):
total +=1
if abs(predicted[i]-actual[i]) <= dilution:
correct +=1
dilutional_accuracies[dilution] = correct/total
data = [len(predicted),direct_accuracy]
columns = ["Supports", "Accuracy"]
for dilution in dilutions:
if str(dilution) == '0':
continue
else:
data.append(dilutional_accuracies[dilution])
columns.append("Within {} Dilution".format(dilution))
if find_errors:
decoder = {v:k for k,v in encoder.items()}
pred_decoded = [decoder[i] for i in predicted]
act_decoded = [decoder[i] for i in actual]
errors = [model_evaluators.find_error_type(i[0],i[1], attribute) for i in zip(pred_decoded, act_decoded)]
error_counts = Counter(errors)
error_types = ["Very Major Error", "Major Error", "Non Major Error", "Correct"]
total_errors = 0
for error_type in error_types:
total_errors += error_counts[error_type]
percent = error_counts[error_type]/len(predicted)
data.append(percent)
columns.append(error_type)
try:
assert len(predicted) == total_errors
except:
print('Number of Errors+Correct does not equal number of predictions')
raise
results_df = pd.DataFrame(data=[data], columns=columns)
return results_df
def mean_summaries(summaries):
"""
Takes a list of model summaries
averages them appropriately, relevant to number of supports
"""
try:
indx = summaries[0].index[0]
except:
indx = 0
mean_df = | pd.DataFrame(columns=summaries[0].columns, index=[indx]) | pandas.DataFrame |
from flask import Flask, render_template, request, jsonify, url_for
import atexit
import os
import json
import folium
from botocore.client import Config
import ibm_boto3
import pandas as pd
import ast
from collections import namedtuple
import numpy as np
class ProvinceMap:
def __init__(self, province_mapping):
self._mp = pd.read_csv('province_id_mapping.csv')
def get_id(self, province):
return self._mp.loc[self._mp.province_nl.str.contains(province) | self._mp.province_en.str.contains(province), "id"].values[0]
def get_prov(self, p_id):
return self._mp.loc[self._mp.id==p_id, "province_en"].values[0]
@property
def index_list(self):
return list(self._mp.id)
province_map = ProvinceMap("province_id_mapping.csv")
def get_provinces_routes_dur(file_name):
# reading province_coordinates.csv to coordinates_dict
body = read_input_file(file_name)
coordinates_df = pd.read_csv(body)
coordinates_dict = coordinates_df.set_index('Province Name').T.to_dict('list')
url = 'http://router.project-osrm.org/table/v1/driving/'
i=1
for k,v in coordinates_dict.items():
if i<len(coordinates_dict):
url = url+str(v[1])+","+str(v[0])+";"
else:
url = url+str(v[1])+","+str(v[0])
i+=1
print(url)
response = requests.get(url)
jsonRes = response.json()
rounded_coord_dict = {(round(v[0],2), round(v[1], 2)):k for k,v in coordinates_dict.items()}
rounded_destinations = [(round(float(x['location'][1]),2), round(float(x['location'][0]), 2)) for x in jsonRes['destinations']]
routes_dur = {(province_map.get_id(rounded_coord_dict[rounded_destinations[x]]), province_map.get_id(rounded_coord_dict[rounded_destinations[y]])): jsonRes['durations'][x][y]
for x, y in list(it.product(range(len(rounded_destinations)), range(len(rounded_destinations))))}
return routes_dur
def get_coordinates(prov_id):
coordinates_df = | pd.read_csv('province_coordinates.csv') | pandas.read_csv |
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search, Q
from elasticsearch.helpers import scan
from itertools import islice
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
import plotly.express as px
from styles import style_sdl, style_sdl_light
from collections import Counter
import dash
import dash_bootstrap_components as dbc
from math import log
from dateutil.parser import parse
import folium
from shapely.geometry import box
from json import load
def fetch_ids(kwd):
if kwd is None or kwd == '':
return None
kwd = kwd.lower()
query = {"track_scores":"true", "_source": ["title","metadata.id"], "size": 10,
"query": {"bool": {"must": [
{ "exists": {"field": "profile.report"}},
{"multi_match" : {"query": kwd,
"fields": ["metadata.title", "metadata.keywords", "metadata.description"]}}]
}}}
with open('../settings.json') as f:
j = load(f)
client = Elasticsearch(j["ElasticSearch"]['es_url'])
response = client.search(index=j["ElasticSearch"]['es_index'], body=query)
d = ((hit['_source']['metadata']['id'], hit['_source']['title']) for hit in response['hits']['hits'] )
d = list(islice(d, 10))
return d
def fetch_ids_by_title(title):
if title is None:
return []
query = { "track_scores": "true", "_source": ["_id", "metadata.title"],
"query": { "bool": { "must": [{ "match": {"metadata.title": title}}],
"filter": [ {"exists": {"field": "profile.report"}}]
}}}
with open('../settings.json') as f:
j = load(f)
client = Elasticsearch(j["ElasticSearch"]['es_url'])
response = client.search(index=j["ElasticSearch"]['es_index'], body=query)
print(query)
print()
out = []
for i, hit in enumerate(response['hits']['hits']):
if i< 2:
print(hit)
out += [{'_id': hit['_id'], 'title': hit['_source']['metadata']['title']}]
print()
print(out[:2])
return out
def fetch_id(rid=None):
if rid is None:
return None
else:
query = {"track_scores":"true",
"query": { "match": {"_id": rid} }, "size": 1}
with open('../settings.json') as f:
j = load(f)
print(rid)
client = Elasticsearch(j["ElasticSearch"]['es_url'])
response = client.search(index=j["ElasticSearch"]['es_index'], body=query)
for hit in response['hits']['hits']:
break
return hit
def switch_item(d, key, val=-1):
d2 = d
if key == 'min':
key = '0%'
if key == 'max':
key = '100%'
d2['key'] = key
if val != -1:
d2['value'] = val
return d2
def search_field(d, key):
for val in d:
if val['key'] == key:
return val['value']
def transform_general(d):
d2 = {}
d2['Number of variables'] = d['n_var']
d2['Number of observations'] = d['n']
d2['Missing cells'] = d['n_cells_missing']
d2['Missing cells (%)'] = '{:.2f}%'.format(100*d['p_cells_missing'])
d2['Duplicate rows'] = d['n_duplicates']
d2['Duplicate rows (%)'] = '{:.2f}%'.format(100*d['p_duplicates'])
d2['Total size in memory'] = '{}.0B'.format(d['memory_size'])
d2['Average record size in memory'] = '{:.2f}.0B'.format(d['record_size'])
return d2
def transform_variable(d, dtype):
d1, d2 = [], []
for item in d:
if item['key'] == 'n_distinct':
d1.append(switch_item(item, 'Distinct'))
elif item['key'] == 'p_distinct':
d1.append(switch_item(item, 'Distinct (%)', '{:.2f}%'.format(100*float(item['value']))))
elif item['key'] == 'n_missing':
d1.append(switch_item(item, 'Missing'))
elif item['key'] == 'p_missing':
d1.append(switch_item(item, 'Missing (%)', '{:.2f}%'.format(100*float(item['value']))))
elif item['key'] == 'memory_size':
d1.append(switch_item(item, 'Memory', '{}.0B'.format(item['value'])))
if dtype in ['Numeric', 'Temporal']:
if item['key'] in ['min', '5%', '25%', '50%', '75%', '95%', 'max']:
d2.append(switch_item(item, item['key']))
elif dtype == 'Spatial':
if item['key'] in ['x_min', 'y_min', 'x_max', 'y_max']:
d2.append(switch_item(item, item['key']))
return d1, d2
def retrieve_plot(var):
fig = ""
if 'freqs' in var:
df = | pd.DataFrame(var['freqs']) | pandas.DataFrame |
#!/usr/bin/env python
import datetime as dt
import glob
import logging
import matplotlib.pyplot as plt
import pandas as pd
import pdb
import numpy as np
import hwtmode.statisticplot
import scipy.ndimage.filters
from sklearn.calibration import calibration_curve
from sklearn import metrics
from tensorflow import is_tensor
from tensorflow.keras import backend as K
import time, os
import xarray
def log(msg, flush=True):
print( time.ctime(time.time()), msg , flush=flush)
def brier_skill_score(obs, preds):
if is_tensor(obs) and is_tensor(preds):
bs = K.mean((preds - obs) ** 2)
obs_climo = K.mean(obs, axis=0) # use each observed class frequency instead of 1/nclasses. Only matters if obs is multiclass.
bs_climo = K.mean((obs - obs_climo) ** 2)
#bss = 1.0 - (bs/bs_climo+K.epsilon()) # TODO: shouldn't K.epsilon() be grouped with denominator?
bss = 1.0 - bs/(bs_climo+K.epsilon())
else:
bs = np.mean((preds - obs) ** 2)
obs_climo = np.mean(obs, axis=0) # use each observed class frequency instead of 1/nclasses. Only matters if obs is multiclass.
bs_climo = np.mean((obs - obs_climo) ** 2)
bss = 1 - bs/bs_climo
return bss
def rptdist2bool(df, rptdist, twin):
# get rid of columns that are not associated with the time window (twin)
dropcol=[]
for r in ["sighail", "sigwind", "hailone", "wind", "torn"]:
for h in [0,1,2]:
if h != twin:
dropcol.append(f"{r}_rptdist_{h}hr")
df = df.drop(columns=dropcol)
# keep track of new Boolean column names
rptcols = []
for r in ["sighail", "sigwind", "hailone", "wind", "torn"]:
rh = f"{r}_rptdist_{twin}hr"
# Convert severe report distance to boolean (0-rptdist = True)
df[rh] = (df[rh] >= 0) & (df[rh] < rptdist)
rptcols.append(rh)
# Any report
any_rpt_col = f"any_rptdist_{twin}hr"
hailwindtorn = [f"{r}_rptdist_{twin}hr" for r in ["hailone","wind","torn"]]
df[any_rpt_col] = df[hailwindtorn].any(axis="columns")
rptcols.append(any_rpt_col)
return df, rptcols
def get_glm(twin,rptdist,date=None):
assert twin == 2, "get_glm assumes time window is 2, not {twin}"
logging.info(f"load {twin}h {rptdist}km GLM")
if date:
logging.info(f"date={date}")
glmfiles = sorted(glob.glob(f"/glade/work/ahijevyc/GLM/{date.strftime('%Y%m%d')}*.glm.nc"))
glm = xarray.open_mfdataset(glmfiles, concat_dim="time_coverage_start", combine="nested")
else:
oneGLMfile = True
if oneGLMfile:
glm = xarray.open_dataset("/glade/scratch/ahijevyc/temp/GLM_all.nc")
else:
glmfiles = sorted(glob.glob("/glade/work/ahijevyc/GLM/2*.glm.nc"))
#glmtimes = [datetime.datetime.strptime(os.path.basename(x), "%Y%m%d%H.glm.nc") for x in glmfiles] # why is this here?
logging.info("open_mfdataset")
glm = xarray.open_mfdataset(glmfiles, concat_dim="time_coverage_start", combine="nested")
assert (glm.time_coverage_start[1] - glm.time_coverage_start[0]) == np.timedelta64(3600,'s'), 'glm.time_coverage_start interval not 1h'
logging.info("Add flashes from previous 2 times and next time to current time. 4-hour centered time window")
glm = glm + glm.shift(time_coverage_start=2) + glm.shift(time_coverage_start=1) + glm.shift(time_coverage_start=-1)
if rptdist != 40:
k = int(rptdist/40)
logging.warning(f"this is not correct. the window overlaps masked points")
logging.warning(f"TODO: save GLM in non-masked form, or filter it from 40km to 120km while unmasked and making the GLM files.")
logging.info(f"sum GLM flash counts in {k}x{k} window")
glm = glm.rolling(x=k, y=k, center=True).sum()
glm = glm.rename(dict(time_coverage_start="valid_time", y="projection_y_coordinate", x="projection_x_coordinate"))
return glm
def print_scores(obs, fcst, label, desc="", n_bins=10, debug=False):
# print scores for this set of forecasts
# histogram of probability values
print(np.histogram(fcst, bins=n_bins))
# reliability curves
true_prob, fcst_prob = calibration_curve(obs, fcst, n_bins=n_bins)
for o, f in zip(true_prob, fcst_prob): print(o, f)
print('brier score', np.mean((obs-fcst)**2))
# BSS
bss_val = hwtmode.statisticplot.bss(obs, fcst)
print('bss', bss_val)
# ROC auc
auc = metrics.roc_auc_score(obs, fcst)
print('auc', auc)
# copied and pasted from ~ahijevyc/bin/reliability_curve_MET.py
"""calibration curve """
fig_index=1
fig = plt.figure(fig_index, figsize=(10, 7))
ax1 = plt.subplot2grid((3,2), (0,0), rowspan=2)
reld = hwtmode.statisticplot.reliability_diagram(ax1, obs, fcst, label=label, n_bins=n_bins, debug=debug)
ax1.tick_params(axis='x', labelbottom=False)
"""histogram of counts"""
ax2 = plt.subplot2grid((3,2), (2,0), rowspan=1, sharex=ax1)
histogram_of_counts = hwtmode.statisticplot.count_histogram(ax2, fcst, label=label, n_bins=n_bins, debug=debug)
ROC_ax = plt.subplot2grid((3,2), (0,1), rowspan=2)
roc_curve = hwtmode.statisticplot.ROC_curve(ROC_ax, obs, fcst, label=label, sep=0.1, debug=debug)
fineprint = f"{desc} {label}\ncreated {str(dt.datetime.now(tz=None)).split('.')[0]}"
plt.annotate(s=fineprint, xy=(1,1), xycoords=('figure pixels', 'figure pixels'), va="bottom", fontsize=5)
ofile = f'{desc}.{label}.png'
plt.savefig(ofile)
print("made", os.path.realpath(ofile))
return true_prob, fcst_prob, bss_val, auc
def upscale(field, nngridpts, type='mean', maxsize=27):
if type == 'mean':
field = scipy.ndimage.filters.uniform_filter(field, size=maxsize, mode='nearest')
#field = scipy.ndimage.filters.uniform_filter(field, size=maxsize, mode='constant') # mode shouldn't matter. mask takes over. But it does matter for pre-Apr 21, 2015 T2 field.
# For some reason, in this case T2 is transformed to 0-4 range
elif type == 'max':
field = scipy.ndimage.filters.maximum_filter(field, size=maxsize)
elif type == 'min':
field = scipy.ndimage.filters.minimum_filter(field, size=maxsize)
field_interp = field.flatten()[nngridpts[1]].reshape((65,93))
return field_interp
def get_features(subset='all'):
# complex features
explicit_features = [ 'COMPOSITE_REFL_10CM', 'REFD_MAX', 'UP_HELI_MAX', 'UP_HELI_MAX03', 'UP_HELI_MAX01', 'W_UP_MAX', 'W_DN_MAX', 'WSPD10MAX', 'RAINNC_1H' ]
env_features = [ 'MUCAPE', 'SBCAPE', 'SBCINH', 'SHR01', 'SHR06', 'MLCINH', 'MLLCL', 'SRH01', 'SRH03', 'T2', 'TD2', 'PSFC','CAPESHEAR', 'STP', 'LR75' ]
env_features.remove('PSFC') # suspect before Sep 2015
static_features = [ 'fhr', 'dayofyear', 'lat', 'lon', 'hgt' ]
large_scale_features = ['U925','U850','U700','U500','V925','V850','V700','V500','T925','T850','T700','T500','TD925','TD850','TD700','TD500']
simple_max_fields = ['COMPOSITE_REFL_10CM', 'REFD_MAX', 'UP_HELI_MAX', 'UP_HELI_MAX03', 'UP_HELI_MAX01', 'W_UP_MAX', 'W_DN_MAX', 'WSPD10MAX', 'RAINNC_1H']
simple_mean_fields = ['STP', 'CAPESHEAR', 'MUCAPE', 'SBCAPE', 'MLCINH', 'SBCINH', 'MLLCL', 'SHR06', 'SHR01', 'SRH03', 'SRH01', 'T2', 'TD2', 'PSFC']
nbrs = [3,5]
if "7x7" in subset:
nbrs = [3,5,7]
simple_max_features = [ f+'-N%dT%d'%(x,t) for f in simple_max_fields for x in nbrs for t in [1,3,5] ]
simple_mean_features = [ f+'-N%dT%d'%(x,t) for f in simple_mean_fields for x in nbrs for t in [1,3,5] ]
basic_features = static_features + explicit_features + env_features
# all fields
if subset == 'all': features = static_features + explicit_features + env_features + large_scale_features + simple_max_features + simple_mean_features
# UH only
if subset == 'uhonly': features = static_features + ['UP_HELI_MAX', 'UP_HELI_MAX-N3T1', 'UP_HELI_MAX-N3T3', \
'UP_HELI_MAX-N3T5', 'UP_HELI_MAX-N5T1', 'UP_HELI_MAX-N5T3', 'UP_HELI_MAX-N5T5']
# basic features only
if subset == 'basic': features = basic_features
# basic + largescale only
if subset == 'basiclarge': features = basic_features + large_scale_features
# environmental features only
if subset == 'envonly': features = static_features + env_features + large_scale_features + simple_mean_features
# no upper air features (this also removed the explicit features accidentally...
if subset == 'noupperair': features = static_features + env_features + simple_mean_features + simple_max_features
if subset[0:11] == 'basic_nbrhd': features = basic_features + simple_mean_features + simple_max_features
if subset == 'storm':
features = ["SBCAPE", "UP_HELI_MAX", "W_UP_MAX", "SHR06", "CAPESHEAR", "TD2", "PREC_ACC_NC", "WSPD10MAX", "STP", "GRPL_MAX", "HGT0C", "CAPESHEAR-N3T1"]
if subset == 'env':
features = ["SBCAPE", "SHR06", "CAPESHEAR", "TD2", "STP", "HGT0C", "CAPESHEAR-N3T1"]
if 'ens_mean' in subset:
features_c = features.copy()
for fcn in ["std", "max", "min"]:
if '_'+fcn in subset:
# add " xxx" versions of each feature
features += [x + " " + fcn for x in features_c]
# remove functions of static features
for static in ["lon", "lat", "hgt", "fhr", "dayofyear"]:
features.remove(static + " " + fcn)
features = list(set(features)) # no duplicates
features.sort()
return features
def read_csv_files(sdate, edate, dataset, members=[str(x) for x in range(1,11)], columns=None):
# read in all CSV files for 1km forecasts
all_files = []
for member in members:
all_files.extend(glob.glob(f'/glade/scratch/ahijevyc/NSC_objects/grid_data_{dataset}_mem{member}_d01_????????-0000.par'))
all_files = set(all_files) # in case you ask for same member twice
log("found "+str(len(all_files))+" files")
all_files = [x for x in all_files if sdate.strftime('%Y%m%d') <= x[-17:-9] <= edate.strftime('%Y%m%d')]
all_files = sorted(all_files) # important for predictions_labels output
log(f'Reading {len(all_files)} forecasts from {sdate} to {edate}')
#df = pd.concat((pd.read_csv(f, compression='gzip', dtype=type_dict) for f in all_files))
#df = pd.concat((pd.read_csv(f, dtype=type_dict) for f in all_files))
ext = all_files[0][-4:]
if ext == ".csv": df = pd.concat((pd.read_csv(f, engine='c') for f in all_files), sort=False)
elif ext == ".par":
df = pd.concat(( | pd.read_parquet(f) | pandas.read_parquet |
#!/usr/bin/env python
# coding=utf-8
# vim: set filetype=python:
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import os
import posixpath
import sys
import math
import datetime
import string
from functools import wraps
import traceback
import xlrd3 as xlrd
import openpyxl
import unicodecsv as csv
from math import log10, floor
from pandas.api.types import is_string_dtype
import pandas as pd
import numpy as np
import six
import six.moves
import orjson as json
from plaidcloud.rpc import utc
from plaidcloud.rpc.connection.jsonrpc import SimpleRPC
from plaidcloud.rpc.rpc_connect import Connect
from plaidcloud.utilities.query import Connection, Table
from plaidcloud.utilities import data_helpers as dh
__author__ = '<NAME>'
__maintainer__ = '<NAME> <<EMAIL>>'
__copyright__ = '© Copyright 2013-2021, Tartan Solutions, Inc'
__license__ = 'Apache 2.0'
CSV_TYPE_DELIMITER = '::'
class ContainerLogger(object):
def info(self, msg):
print(msg, file=sys.stderr)
def debug(self, msg):
self.info(msg)
def exception(self, msg=None):
print(traceback.format_exc(), file=sys.stderr)
if msg is not None:
print(msg, file=sys.stderr)
logger = ContainerLogger()
def sql_from_dtype(dtype):
"""Returns a sql datatype given a pandas datatype
Args:
dtype (str): The pandas datatype to convert
Returns:
str: the equivalent SQL datatype
Examples:
>>> sql_from_dtype('bool')
'boolean'
>>> sql_from_dtype('float64')
'numeric'
>>> sql_from_dtype('number')
'numeric'
>>> sql_from_dtype('varchar(123)')
'text'
>>> sql_from_dtype('char(3)')
'text'
>>> sql_from_dtype('xml')
'text'
>>> sql_from_dtype('bytea')
'largebinary'
"""
mapping = {
'bool': 'boolean',
'boolean': 'boolean',
's8': 'text',
's16': 'text',
's32': 'text',
's64': 'text',
's128': 'text',
's256': 'text',
'object': 'text',
's512': 'text',
's1024': 'text',
'text': 'text',
'string': 'text',
'int8': 'smallint', # 2 bytes
'int16': 'integer',
'smallint': 'smallint',
'int32': 'integer', # 4 bytes
'integer': 'integer',
'int64': 'bigint', # 8 bytes
'bigint': 'bigint',
'float8': 'numeric',
'float16': 'numeric', # variable but ensures precision
'float32': 'numeric', # variable but ensures precision
'float64': 'numeric', # variable but ensures precision
'numeric': 'numeric',
'serial': 'serial',
'bigserial': 'bigserial',
'datetime64[s]': 'timestamp', # This may have to cover all datettimes
'datetime64[d]': 'timestamp',
'datetime64[ns]': 'timestamp',
'timestamp': 'timestamp',
'timestamp without time zone': 'timestamp',
'timedelta64[s]': 'interval', # This may have to cover all timedeltas
'timedelta64[d]': 'interval',
'timedelta64[ns]': 'interval',
'interval': 'interval',
'date': 'date',
'time': 'time',
'binary': 'largebinary',
'bytea': 'largebinary',
'largebinary': 'largebinary',
'xml': 'text',
'uuid': 'text',
'money': 'numeric',
'real': 'numeric',
'json': 'text',
'cidr': 'text',
'inet': 'text',
'macaddr': 'text',
}
dtype = str(dtype).lower()
if dtype.startswith('num'):
dtype = 'numeric'
elif 'char' in dtype:
dtype = 'text'
return mapping[dtype]
def save_typed_psv(df, outfile, sep='|', **kwargs):
"""Saves a typed psv, from a pandas dataframe. Types are analyze compatible
sql types, written in the header, like {column_name}::{column_type}, ...
Args:
df (`pandas.DataFrame`): The dataframe to create the psv from
outfile (file object or str): The path to save the output file to
sep (str, optional): The separator to use in the output file
"""
# ADT2017: _write_copy_from did something special with datetimes, but I'm
# not sure it's necessary, so I'm leaving it out.
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
column_names = [cleaned(n) for n in list(df)]
column_types = [sql_from_dtype(d) for d in df.dtypes]
header = [
CSV_TYPE_DELIMITER.join((name, sqltype))
for name, sqltype in six.moves.zip(column_names, column_types)
]
df.to_csv(outfile, header=header, index=False, sep=sep)
def list_of_dicts_to_typed_psv(lod, outfile, types, fieldnames=None, sep='|'):
""" Saves a list of dicts as a typed psv. Needs a dict of sql types. If
provided, fieldnames will specify the column order.
Args:
lod (:type:`list` of :type:`dict`): The list of dicts containing the data
to use to create the psv
outfile (str): The path to save the output file to, including file name
types (dict): a dict with column names as the keys and column datatypes as
the values
fieldnames (:type:`list` of :type:`str`, optional): A list of the field names.
If none is provided, defaults to the keys in `types`
sep (str): The separator to use in the output file
"""
def cleaned(name):
return six.text_type(name).replace(CSV_TYPE_DELIMITER, '')
header = {
name: CSV_TYPE_DELIMITER.join((cleaned(name), sqltype))
for name, sqltype in types.items()
}
if fieldnames is None:
# Caller doesn't care about the order
fieldnames = list(types.keys())
if isinstance(outfile, six.string_types):
buf = open(outfile, 'wb')
else:
buf = outfile
try:
writer = csv.DictWriter(buf, fieldnames=fieldnames, delimiter=sep)
writer.writerow(header) # It's not just the keys, so we're not using writeheader
for row in lod:
writer.writerow(row)
finally:
if isinstance(outfile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def get_project_variables(token, uri, project_id):
"""It opens a connection to Analyze and then
gets vars for a given project
Args:
token (str): oAuth token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
project_id (str): Id of the Project for which to grab the variables
Returns:
dict: Variables as key/values
"""
rpc = SimpleRPC(token, uri, verify_ssl=True)
try:
project_vars = rpc.analyze.project.variables(project_id=project_id)
except:
project_vars = rpc.analyze.project.variables(project=project_id)
return {pv['id']: pv['value'] for pv in project_vars}
def download(tables, configuration=None, retries=5, conn=None, clean=False, **kwargs):
"""This replaces the old get_tables() that was client-specific.
It opens a connection to Analyze and then
accepts a set of tables and saves them off to a local location.
For now, tables are understood to be typed psv's, but that can expand to
suit the need of the application (for instance, Excel.)
Args:
tables (set or list): table paths to retrieve (for backwards compatibility, you can leave off the initial '/')
token (str): token to pass into
uri (str): Typically https://ci.plaidcloud.com/json-rpc/, https://plaidcloud.com/json-rpc/
or local dev machine equiv. Would typically originate from a local config.
local_storage_path (str): local path where files should be saved. Would typically originate
from a local config.
**kwargs:
config (dict) contains a dict of config settings
token (str) simpleRFC authorization token
uri (str): uri e.g. 'https://ci.plaidcloud.com/json-rpc/'
local_storage_path (str) Target for files being saved
Returns:
The return value of function. If retries are exhausted, raises the
final Exception.
Examples:
"""
# TODO: if configuration is None, revert to **kwargs for the params we need.
if not conn:
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
try:
return_df = configuration['return_df']
except:
return_df = True
try:
project_id = configuration['project_id']
except:
project_id = conn.project_id
dfs = []
for table in tables:
table_path = table.get('table_name')
query = table.get('query')
table_obj = table.get('table_object')
df = None # Initial value
# wipe this out each time through
clean_df = pd.DataFrame()
logger.debug("Attempting to download {0}...".format(table_path))
tries = 1
if table_obj is not None:
# RPC table object exists; proceed to use it to fetch data
while tries <= retries:
if query is None:
# no query passed. fetch whole table
df = conn.get_dataframe(table_obj, clean=clean)
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
elif isinstance(query, six.string_types):
# query object passed in. execute it
try:
df = conn.get_dataframe_by_querystring(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
else:
# query object passed in. execute it
try:
df = conn.get_dataframe_by_query(query)
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
else:
if isinstance(df, pd.core.frame.DataFrame):
logger.debug("Downloaded {0}...".format(table_path))
break
tries += 1
columns = table_obj.cols()
if columns:
if isinstance(df, pd.core.frame.DataFrame):
cols = [c['id'] for c in columns if c['id'] in df.columns.tolist()]
df = df[cols] # this ensures that the column order is as expected
else:
cols = [c['id'] for c in columns]
df = pd.DataFrame(columns=cols) # create empty dataframe with expected metadata/shape
else:
if not table_path.startswith('/'):
table_path = '/{}'.format(table_path)
table_result = None
while not table_result and tries <= retries:
tries += 1
try:
table_result = conn.analyze.table.table(project_id=project_id, table_path=table_path)
logger.debug("Downloaded {0}...".format(table_path))
break
except Exception as e:
logger.exception("Attempt {0}: Failed to download {1}: {2}".format(tries, table_path, e))
df = table_result_to_df(table_result or pd.DataFrame())
if not isinstance(df, pd.core.frame.DataFrame):
logger.exception('Table {0} failed to download!'.format(table_path))
elif len(df.columns) == 0:
logger.exception('Table {0} downloaded 0 records!'.format(table_path))
else:
if clean and query:
# Use the old cleaning process for things other than the full query.
clean_df = dh.clean_frame(df)
else:
clean_df = df
dfs.append({'df': clean_df, 'name': table_path})
return dfs
def load(source_tables, fetch=True, cache_locally=False, configuration=None, conn=None, clean=False):
"""Load frame(s) from requested source, returning a list of dicts
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
"""
return_type = None
if type(source_tables) == list:
return_type = 'list'
elif type(source_tables) == str:
# single table (as string) passed... expecting to return full table
source_tables = [source_tables]
return_type = 'dataframe'
elif type(source_tables) == dict:
# single table (as dict) passed... likely with subsetting query, but not req'd
source_tables = [source_tables]
return_type = 'dataframe'
source_tables_proper = []
reassign = False
for s in source_tables:
if type(s) == str:
# convert list of strings into a list of dicts
reassign = True
d = {}
d['table_name'] = s
source_tables_proper.append(d)
if reassign:
# replace source_tables with reformatted version
source_tables = source_tables_proper
dfs = []
if fetch is True:
if not conn:
# create connection object
try:
rpc = Connect()
except:
logger.exception('Could not connect via RPC')
return False
conn = Connection(project=rpc.project_id)
for s in source_tables:
# create table objects if they don't exist
if s.get('table_object') == None:
s['table_object'] = Table(conn, s.get('table_name'))
downloads = download(source_tables, configuration=configuration, conn=conn, clean=clean)
for d in downloads:
df = d.get('df')
name_of_df = '{0}.psv'.format(d.get('name'))
if name_of_df.startswith('/'):
name_of_df = name_of_df[1:]
if cache_locally is True:
with open(os.path.join(configuration['LOCAL_STORAGE'], name_of_df), 'w') as f:
save_typed_psv(df, f)
dfs.append(df)
else:
for s in source_tables:
source_table = '{0}.psv'.format(s.get('table_name'))
source_path = os.path.join(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
if return_type == 'dataframe':
return dfs[0]
else:
return dfs
def load_new(source_tables, sep='|', fetch=True, cache_locally=False, configuration=None, connection=None):
"""Load frame(s) from requested source
If local, will load from the typed_psv. If Analyze, then will load the analyze table.
TODO: Make it fetch from analyze table....really this should be assimilated with dwim once dwim works again.
TODO: Make it go to analyze and cache locally, if requested to do so.
"""
if connection:
configuration['project_id'] = connection.project_id
if fetch is True:
download(source_tables, configuration)
dfs = []
for source_table in source_tables:
_, table_name = posixpath.split(source_table)
source_path = '{}/{}.psv'.format(configuration['LOCAL_STORAGE'], source_table)
df = load_typed_psv(source_path)
dfs.append(df)
return dfs
def dtype_from_sql(sql):
"""Gets a pandas dtype from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas dtype equivalent of `sql`
"""
mapping = {
'boolean': 'bool',
'text': 'object',
'smallint': 'int16',
'integer': 'int32',
'bigint': 'int64',
'numeric': 'float64',
'timestamp': 'datetime64[s]',
'interval': 'timedelta64[s]',
'date': 'datetime64[s]',
'time': 'datetime64[s]',
}
return mapping.get(str(sql).lower(), None)
def sturdy_cast_as_float(input_val):
"""
Force a value to be of type 'float'. Sturdy and unbreakeable.
Works like data_helpers.cast_as_float except it returns NaN and None
in cases where such seems appropriate, whereas the former forces to 0.0.
"""
if input_val is None:
return 0.0
try:
if np.isnan(input_val):
float('nan')
else:
try:
return float(input_val)
except ValueError:
return None
except:
try:
return float(input_val)
except ValueError:
return None
def converter_from_sql(sql):
"""Gets a pandas converter from a SQL data type
Args:
sql (str): The SQL data type
Returns:
str: the pandas converter
"""
mapping = {
'boolean': bool,
'text': str,
'smallint': int,
'integer': int,
'bigint': int,
#'numeric': float, #dh.cast_as_float,
#'numeric': dh.cast_as_float,
'numeric': sturdy_cast_as_float,
'timestamp': pd.datetime,
'interval': pd.datetime,
'date': pd.datetime,
'time': pd.datetime,
}
return mapping.get(str(sql).lower(), str(sql).lower())
def load_typed_psv(infile, sep='|', **kwargs):
""" Loads a typed psv into a pandas dataframe. If the psv isn't typed,
loads it anyway.
Args:
infile (str): The path to the input file
sep (str, optional): The separator used in the input file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
if isinstance(infile, six.string_types):
if os.path.exists(infile):
buf = open(infile, 'rb')
else:
logger.exception('File does not exist: {0}'.format(infile))
return False
else:
buf = infile
try:
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
names_and_types = [h.split(CSV_TYPE_DELIMITER) for h in header]
column_names = [n[0] for n in names_and_types]
try:
dtypes = {
name: dtype_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
dtypes = None
converters={}
#for name, sqltype in names_and_types:
#converter = converter_from_sql(sqltype)
#if converter:
#converters[name] = converter
try:
converters = {
name: converter_from_sql(sqltype)
for name, sqltype in names_and_types
}
except ValueError:
# Missing sqltype - looks like this is a regular, untyped csv.
# Let's hope that first line was its header.
converters = None
# This will start on the second line, since we already read the first line.
#return pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep)
na_values = [
#'', # This was here, and then commented out, and I'm putting it back in 20180824. ***
# # If it isn't here, we fail when attempting to import a delimited file of type 'numeric'
# # it is coming in as null/empty (e.g. the last record in the following set:)
# # LE::text|PERIOD::text|RC::text|MGCOA::text|VT::text|TP::text|FRB::text|FUNCTION::text|DCOV::numeric|LOCAL_CURRENCY::text|CURRENCY_RATE::numeric|DCOV_LC::numeric
# # LE_0585|2018_01|6019999|6120_NA|VT_0585|TP_NA|FRB_AP74358|OM|0.00031|EUR|0.8198|0.000254138
# # LE_0003|2018_07|CA10991|5380_EBITX|VT_9988|TP_NA|FRB_APKRA15|OM|-0.00115|INR|68.7297|-0.079039155
# # LE_2380|2017_08|AP92099|Q_5010_EBITX|VT_0585|TP_NA|FRB_AP92099|RE|99|||
'#N/A',
'#N/A N/A',
'#NA',
'-1.#IND',
'-1.#QNAN',
'-NaN',
'-nan',
'1.#IND',
'1.#QNAN',
'N/A',
'NA',
'NULL',
'NaN',
'n/a',
'nan',
'null'
]
parse_dates = []
if dtypes is not None:
for k, v in six.iteritems(dtypes):
dtypes[k] = v.lower()
#Handle inbound dates
#https://stackoverflow.com/questions/21269399/datetime-dtypes-in-pandas-read-csv
if 'datetime' in dtypes[k]:
dtypes[k] = 'object'
parse_dates.append(k)
try:
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, encoding='utf-8')
except ValueError:
#remove dtypes if we have converters instead:
for k in six.iterkeys(converters):
if k in list(dtypes.keys()):
dtypes.pop(k, None)
na_values.append('')
buf = open(infile, 'rb')
headerIO = six.BytesIO(buf.readline()) # The first line needs to be in a separate iterator, so that we don't mix read and iter.
header = next(csv.reader(headerIO, delimiter=sep)) # Just parse that first line as a csv row
df = pd.read_csv(buf, header=None, names=column_names, dtype=dtypes, sep=sep, na_values=na_values, keep_default_na=False, parse_dates=parse_dates, converters=converters, encoding='utf-8')
finally:
# A final note:
# SURELY there's a more efficient and native pandas way of doing this, but I'll be damnded if I could figure it out.
# Pandas used to have an error='coerce' method to force data type. It's no longer an option, it seems.
# Forcing data type is NOT easy, when incoming text data is sequential delimiters with no values or whitespace.
# What We're doing now is still not w/o risk. There are use cases for setting empty to zero, which is what we're doing, and use cases to set
# empty to null, which is probably what we SHOULD do, but for now, we do it this way because we already have a battle hardened dh.cast_as_float that
# works this way. We should probably just call a different homegrown float that returns a NaN or None (None being preferred) rather than 0.0 on exception.
# Mercy. This has been a pain.
# I guess if it was easy, Pandas wouldn't support the ability to send in your own converters.
pass
return df
finally:
if isinstance(infile, six.string_types):
buf.close()
#Otherwise leave it open, since this function didn't open it.
def table_result_to_df(result):
"""Converts a SQL result to a pandas dataframe
Args:
result (dict): The result of a database query
Returns:
`pandas.DataFrame`: A dataframe representation of `result`
"""
meta = result['meta']
data = result['data']
columns = [m['id'] for m in meta]
dtypes = {
m['id']: dtype_from_sql(m['dtype'].lower())
for m in meta
}
df = pd.DataFrame.from_records(data, columns=columns)
try:
typed_df = df.astype(dtype=dtypes)
except:
"""
This is heavy-handed, but it had to be.
Something was tripping up the standard behavior, presumably relating to
handling of nulls in floats. We're forcing them to 0.0 for now, which is possibly
sketchy, depending on the use case, but usually preferred behavior.
Buyer beware.
"""
typed_df = df
for col in typed_df.columns:
if dtypes[col] == u'object':
typed_df[col] = list(map(dh.cast_as_str, typed_df[col]))
elif dtypes[col].startswith(u'float'):
typed_df[col] = list(map(dh.cast_as_float, typed_df[col]))
elif dtypes[col].startswith(u'int'): #detect any flavor of int and cast it as int.
typed_df[col] = list(map(dh.cast_as_int, typed_df[col]))
return typed_df
def dwim_save(df, name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, saves a dataframe as an analyze table.
Otherwise saves it as a typed psv in localdir.
Args:
df (`pandas.DataFrame`): The dataframe to save
name (str): The name to save this dataframe as
localdir (str, optional): The local path to save the typed psv
lvl (str, optional): What level (project/model) the table should be
extension (str, optional): What file extension to give the output file
sep (str, optional): The separator to use in the output file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import save, save_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
save_fn = {
'model': save,
'project': save_project,
}[lvl]
save_fn(df, name)
except ImportError:
# We must not be on an app server, so save as typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
save_typed_psv(df, path, sep)
def dwim_load(name, localdir='/tmp', lvl='model', extension='txt', sep='|', **kwargs):
"""If we're on an app server, loads an analyze table.
Otherwise loads a typed psv from localdir.
Args:
name (str): The name of the table or file to load
localdir (str, optional): The path to the directory where the local file is stored
lvl (str, optional): The level (model/project) of the table to load
extension (str, optional): The flie extension of the local file
sep (str, optional): The separator used in the local file
"""
#TODO: for now we just ignore extra kwargs - we accept them to make it a
#little easier to convert old to_csvs and read_csvs. In the future, we
#should probably pass as many of them as possible on to to_csv/read_ccsv -
#the only issue is we have to make sure the header stays consistent.
try:
from plaid.app.analyze.sandboxed_code.user.iplaid.frame import load, load_project
# We must be on the app server.
# TODO: change this when we change how iplaid works
load_fn = {
'model': load,
'project': load_project,
}[lvl]
return load_fn(name)
except ImportError:
# We must not be on an app server, so load from typed_psv
fname = '.'.join((name, extension))
if lvl == 'model':
path = os.path.join(localdir, fname)
else:
path = os.path.join(localdir, lvl, fname)
return load_typed_psv(path, sep)
def clean_uuid(id):
"""Removes any invalid characters from a UUID and ensures it is 32 or 36 characters
Args:
id (str): The ID to clean
Returns:
str: `id` with any invalid characters removed
"""
# !! WARNING: If you're calling this in new code, make sure it's really what you
# !! want. It used to remove dashes. That turned out to be a bad idea. Now
# !! it leaves dashes in.
#
# !! If you've found a bug related to dashes being left in, and this is
# !! being called on lookup, you should probably just remove the call to
# !! clean_uuid. Going forward, we don't remove dashes.
if id is None:
return None
name = six.text_type(id).lower()
valid_chars = '0123456789abcdef-'
cleaned_id = u''.join(n for n in name if n in valid_chars)
if '-' in cleaned_id:
if len(cleaned_id) != 36:
raise Exception("Could not clean id {}. Not 36 characters long.".format(id))
else:
if len(cleaned_id) != 32:
raise Exception("Could not clean id {}. Not 32 characters long.".format(id))
return cleaned_id
def clean_name(name):
"""
DEPRECATED: does nothing
Removes any invalid characters from a name and limits it to 63 characters
Args:
name (str): The name to clean
Returns:
str: The cleaned version of `name`
"""
return name
def clean_filename(name):
"""Remove '/' from a name
Args:
name (str): the filename to clean
Returns:
str: the cleaned version of `name`
"""
if name is None:
return None
# everything's fine except /
return six.text_type(name).translate({'/': None})
def describe(df):
"""Shorthand for df.describe()
Args:
df (`pandas.DataFrame`): The dataframe to describe
Returns:
summary: Series/DataFrame of summary statistics
"""
return df.describe()
def unique_values(df, column):
"""Returns unique values in the provided column
Args:
df (`pandas.DataFrame`): The DataFrame containing data
column (str): The column to find unique values in
Returns:
list: The unique values in the column
"""
return df[column].unique()
def count_unique(group_by, count_column, df):
"""Returns a count of unique items in a dataframe
Args:
group_by (str): The group by statement to apply to the dataframe
count_column (str): The column to count unique records in
df (`pandas.DataFrame`): The DataFrame containing the data
Returns:
int: The count of unique items in the specified column after grouping
"""
return df.groupby(group_by)[count_column].apply(lambda x: len(x.unique()))
def sum(group_by, df):
return df.groupby(group_by).sum()
def std(group_by, df):
return df.groupby(group_by).std()
def mean(group_by, df):
return df.groupby(group_by).mean()
def count(group_by, df):
return df.groupby(group_by).count()
def inner_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps only matches
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='inner')
def outer_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps data from both frames and matches up using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='outer')
def left_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from left frame and any matches in right using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='left')
def right_join(left_frame, right_frame, left_on, right_on=None, keep_columns=None):
"""Keeps all data from right frame and any matches in left using the on_columns
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
keep_columns (:type:`list` of :type:`str`, optional): A list of columns to keep in the result
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
right_cols = right_frame.columns
# optional arg to specify which columns in right table to keep
if keep_columns is not None:
drop_columns = set()
for col in right_cols:
if (col in keep_columns) or (col in right_on):
pass
else:
# column not being kept
drop_columns.add(col)
# exclude columns from right table
right_cols = right_cols.difference(drop_columns)
return pd.merge(left_frame, right_frame[right_cols], left_on=left_on, right_on=right_on, how='right')
def anti_join(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from left frame that is not found in right frame
Args:
left_frame (`pandas.DataFrame`): The first frame to join
right_frame (`pandas.DataFrame`): The second frame to join on
left_on (str): Which column in the left frame to join on
right_on (str, optional): Which column to join on in the right frame, if different from `left_on`
Returns:
`pandas.DataFrame`: A frame containing the results of the join
"""
if right_on is None:
right_on = left_on
if type(left_on) == str:
left_on = [left_on]
if type(right_on) == str:
right_on = [right_on]
indicator_status = False
indicator_name = '_merge'
left_cols = left_frame.columns
# avoid collision with pd generated indicator name
while not indicator_status:
if indicator_name in left_cols:
indicator_name = '_' + indicator_name
else:
indicator_status = True
df = pd.merge(left_frame, right_frame[right_on], how='left', left_on=left_on, right_on=right_on, indicator=indicator_name)
df = df[df[indicator_name] == 'left_only']
del df[indicator_name]
return df
def compare(left_frame, right_frame, left_on, right_on=None):
"""Keeps all data from right frame and any matches in left using the on_columns"""
#20180420 PBB Is "compare" a good name for this, it's basically a right-join in SQL terms?
#20180420 MWR It's quite old legacy. Not sure this one has ever been used for anything. Perhaps
# we can just do away with it.
if right_on is None:
right_on = left_on
return pd.merge(left_frame, right_frame, left_on=left_on, right_on=right_on, how='outer')
def apply_rule(df, rules, target_columns=None, include_once=True, show_rules=False):
"""
If include_once is True, then condition n+1 only applied to records left after condition n.
Adding target column(s), plural, because we'd want to only run this operation once, even
if we needed to set multiple columns.
Args:
df (pandas.DataFrame): The DataFrame to apply rules on
rules (list): A list of rules to apply
target_columns (list of str, optional): The target columns to apply rules on.
include_once (bool, optional): Should records that match multiple rules
be included ONLY once? Defaults to `True`
show_rules (bool, optional): Display the rules in the result data? Defaults to `False`
Returns:
pandas.DataFrame: The results of applying rules to the input `df`
"""
target_columns = target_columns or ['value']
df_final = pd.DataFrame()
df['temp_index'] = df.index
df['include'] = True
df['log'] = ''
if show_rules is True:
df['rule_number'] = ''
df['rule'] = ''
# Establish new column(s) as blank columns.
for column in target_columns:
df[column] = ''
def exclude_matched(include, match):
"""
Exclude if matched, or if previously excluded
Please do not change the 'if match is True:' line to 'if match:'. It matters here.
"""
return False if match is True else include
rule_num = 0
for rule in rules:
rule_num = rule_num + 1
rule_condition = rule.get('condition')
# Find subset based on condition
if rule_condition is not None and rule_condition != '' and str(rule_condition) != 'nan':
try:
df_subset = df[df['include'] == True].query(rule_condition, engine='python')
print('subset length: {}'.format(len(df[df['include'] == True])))
if show_rules:
df_subset['rule_number'] = str(rule_num)
df_subset['rule'] = str(rule_condition)
except Exception as e:
# TODO update this. We should capture all exceptions in an exception table.
df_subset = pd.DataFrame()
def add_message(log):
return '<{} ::: {}>'.format(e, log) # removed redundant rule_condition format param from here
if show_rules:
df['log'] = list(map(add_message, df['log']))
error_msg = ' (rule_num {0}) {1} error: {2}'.format(rule_num, rule_condition, e)
logger.exception('EXCEPTION {}'.format(error_msg))
else:
df_subset = df[df['include'] == True]
# Populate target columns as specified in split
for column in target_columns:
df_subset[column] = rule[column]
# need to find a way to flip the flag once data has been selected
if include_once:
# Exclude the records of the current split from exposure to
# subsequent filters.
#if statement handles edge case where df is empty and has no columns.
if 'temp_index' in df_subset.columns:
#refactor to be m*1 not m*n.
df_subset['match'] = True
df = lookup(
df,
df_subset,
left_on=['temp_index'],
right_on=['temp_index'],
keep_columns=['match']
)
df['include'] = list(map(exclude_matched, df['include'], df['match']))
del df['match']
# The way we're doing this allows multiple matches
# if include_once is false.
# Future: MAY be a reason to allow first-in wins or last-in wins, or ALL win.
df_final = pd.concat([df_final, df_subset])
print('length:{}'.format(len(df_subset)))
df_final.drop(columns=['temp_index', 'include'], inplace=True, errors='ignore')
return df_final
def apply_rules(df, df_rules, target_columns=None, include_once=True, show_rules=False,
verbose=True, unmatched_rule='UNMATCHED', condition_column='condition', iteration_column='iteration',
rule_id_column=None, logger=logger):
"""
If include_once is True, then condition n+1 only applied to records left after condition n.
Adding target column(s), plural, because we'd want to only run this operation once, even
if we needed to set multiple columns.
Args:
df (pandas.DataFrame): The DataFrame to apply rules on
df_rules (pandas.DataFrame): A list of rules to apply
target_columns (list of str, optional): The target columns to apply rules on.
include_once (bool, optional): Should records that match multiple rules
be included ONLY once? Defaults to `True`
show_rules (bool, optional): Display the rules in the result data? Defaults to `False`
verbose (bool, optional): Display the rules in the log messages? Defaults
to `True`. This is not overly heavier than leaving it off, so we probably should
always leave it on unless logging is off altogether.
unmatched_rule (str, optional): Default rule to write in cases of records not matching any rule
condition_column (str, optional): Column name containing the rule condition, defaults to 'condition'
rule_id_column (str, optional): Column name containing the rule id, just set to index if not provided
logger (object, optional): Logger to record any output
Returns:
list of pandas.DataFrame: The results of applying rules to the input `df`
"""
target_columns = target_columns or ['value']
df_rules = df_rules.reset_index(drop=True)
if iteration_column not in df_rules.columns:
df_rules[iteration_column] = 1
df['include'] = True
df['log'] = ''
if show_rules is True:
df['rule_number'] = ''
df['rule'] = ''
df['rule_id'] = '' if rule_id_column else df.index
# Establish new column(s) as blank columns <i>if they do not already exist.</i>
for column in target_columns:
if column not in df.columns:
df[column] = ''
summary = []
iterations = list(set(df_rules[iteration_column]))
iterations.sort()
for iteration in iterations:
df['include'] = True
def write_rule_numbers(rule_num):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if rule_num == '':
return str(index)
else:
return '{}, {}'.format(rule_num, str(index))
def write_rule_conditions(condition):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if condition == '':
return str(rule[condition_column])
else:
return '{}, {}'.format(condition, str(rule[condition_column]))
def write_rule_id(rule_id):
"""Need to allow for fact that there will be > 1 sometimes if we have > iteration."""
if rule_id == '':
return str(rule[rule_id_column])
else:
return '{}, {}'.format(rule_id, str(rule[rule_id_column]))
matches = [] # for use when include_once is False
index = 0
for index, rule in df_rules[df_rules[iteration_column] == iteration].iterrows():
# Find subset based on condition
df_subset = df[df['include'] == True]
input_length = len(df_subset)
if include_once is True and input_length == 0:
break
if verbose:
logger.debug('')
logger.debug('iteration:{} - rule:{} - {}'.format(iteration, index, rule[condition_column]))
if rule[condition_column] is not None and rule[condition_column] != '' and str(rule[condition_column]) != 'nan':
try:
df_subset = df_subset.query(rule[condition_column])#, engine='python')
if verbose:
logger.debug('{} - input length'.format(input_length))
if show_rules is True:
if include_once is True:
df.loc[list(df_subset.index), 'rule_number'] = list(map(write_rule_numbers, df.loc[list(df_subset.index), 'rule_number']))
df.loc[list(df_subset.index), 'rule'] = list(map(write_rule_conditions, df.loc[list(df_subset.index), 'rule']))
if rule_id_column:
df.loc[list(df_subset.index), 'rule_id'] = list(map(write_rule_id, df.loc[list(df_subset.index), 'rule_id']))
else:
df_subset['rule_number'] = df_subset.index
df_subset['rule'] = rule[condition_column]
if rule_id_column:
df_subset['rule_id'] = rule[rule_id_column]
except Exception as e:
df_subset = pd.DataFrame()
def add_message(log):
return '<{} ::: {}>'.format(e, log) # removed redundant rule[condition_column] param from format string
if show_rules is True:
df['log'] = list(map(add_message, df['log']))
error_msg = ' (rule_num {0}) {1} error: {2}'.format(index, rule[condition_column], e)
logger.exception('EXCEPTION {}'.format(error_msg))
# Populate target columns as specified in split
for column in target_columns:
if rule[column] not in ['nan', '', 'None', None]:
if include_once is True:
df.loc[list(df_subset.index), column] = rule[column]
else:
df_subset[column] = rule[column]
# The way we're doing this allows multiple matches if include_once is False.
# Future: MAY be a reason to allow first-in wins or last-in wins, or ALL win.
# MIKE look here.
# df_final = pd.concat([df_final, df_subset])
matched_length = len(df_subset)
if verbose:
logger.debug('{} - matched length'.format(matched_length))
if include_once:
# Exclude the records of the current split from exposure to subsequent filters.
df.loc[list(df_subset.index), 'include'] = False
else:
if matched_length > 0:
matches.append(df_subset)
summary_record = {
'row_num': index,
iteration_column: iteration,
'input_records': input_length,
'matched_records': matched_length,
}
summary_record.update(rule)
summary.append(summary_record)
if include_once is False:
if len(matches) > 0:
df = | pd.concat(matches) | pandas.concat |
Subsets and Splits