prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# -*- coding:utf-8 -*-
# Deep Streaming Label Learning
# <NAME> adapted
from collections import defaultdict
import random
import sklearn.metrics as metrics
from sklearn import metrics
from model import _classifier, _classifier2, \
KnowledgeDistillation, _classifierBatchNorm, IntegratedDSLL, LossPredictionMod, MarginRankingLoss_learning_loss, _S_label_mapping
# _BP_ML,_DNN,IntegratedModel,_label_representation, _S_label_mapping, _S_label_mapping2,
from helpers import predictor_accuracy, precision_at_ks, predict, predict_integrated, \
print_predict, LayerActivations, modify_state_dict
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data import Dataset, TensorDataset
# import umap
class CustomDataset(Dataset):
def __init__(self, x_tensor,y_mapping_tensor, y_tensor):
self.x = x_tensor
self.y_mapping = y_mapping_tensor
self.y = y_tensor
def __getitem__(self, index):
return (self.x[index], self.y_mapping[index], self.y[index])
def __len__(self):
return len(self.x)
def train_new(model, train_X, train_Y, hyper_params):
print("Start training model with old labels")
train_X_tensor = torch.from_numpy(train_X).float()
train_Y_tensor = torch.from_numpy(train_Y).float()
train_data = TensorDataset(train_X_tensor, train_Y_tensor)
train_loader = DataLoader(dataset=train_data,
batch_size=hyper_params.batch_size,
shuffle=True,
num_workers=5
)
# loss_fn = nn.MultiLabelSoftMarginLoss()
loss_fn = AsymmetricLoss()
optimizer = optim.Adam(model.parameters(), weight_decay=0.00001, lr=0.0001)
model.train()
amount = 1
epochs = hyper_params.label_dest_epoch
for _ in range(epochs):
for x, y in train_loader:
optimizer.zero_grad()
yhat = model(x)
loss = loss_fn(yhat, y)
# print(loss)
loss.backward()
optimizer.step()
amount+=1
print(f"Done training old labels {amount*hyper_params.batch_size} examples trained")
return model
def make_train_step(model, loss_fn, optimizer):
def train_step(x, y):
model.train()
optimizer.zero_grad()
yhat = model(x)
loss = loss_fn(yhat, y)
loss.backward()
optimizer.step()
return loss.item()
# Returns the function that will be called inside the train loop
return train_step
def make_eval_step(model, loss_fn, optimizer):
def train_step(x, y):
model.eval()
optimizer.zero_grad()
yhat = model(x)
loss = loss_fn(yhat, y)
return loss.item()
# Returns the function that will be called inside the train loop
return train_step
def observe_train_DSLL(hyper_params, classifier, training_losses, train_X, mapping_train_Y_new, train_Y_new, test_X,
mapping_test_Y_new, test_Y_new):
# print nicely keeping track of where we are at the loss
print('[%d/%d]Loss: %.3f' % (
hyper_params.currentEpoch + 1, hyper_params.classifier_epoch, np.mean(training_losses)))
if ((((hyper_params.currentEpoch + 1) % 10) == 0) | ((hyper_params.currentEpoch + 1)
== hyper_params.classifier_epoch)):
print('train performance')
pred_Y_train = predict_integrated(classifier, train_X, mapping_train_Y_new)
# print(mapping_train_Y_new[:10])
# print(train_X[:10])
# exit()
_ = print_predict(train_Y_new, pred_Y_train, hyper_params)
# if (((hyper_params.currentEpoch + 1) % 5 == 0) | (hyper_params.currentEpoch < 10)):
print('test performance')
pred_Y = predict_integrated(classifier, test_X, mapping_test_Y_new)
# weird case in which random active learning alternates between numpy and tensor
if type(np.random.randint(0,1,1)) == type(test_Y_new):
measurements = print_predict(test_Y_new, pred_Y, hyper_params)
else:
measurements = print_predict(test_Y_new.numpy(), pred_Y, hyper_params)
return measurements
def observe_train(hyper_params, classifier, training_losses, train_X, train_Y, test_X, test_Y):
print('[%d/%d]Loss: %.3f' % (
hyper_params.currentEpoch + 1, hyper_params.classifier_epoch, np.mean(training_losses)))
if ((((hyper_params.currentEpoch + 1) % 10) == 0) | ((hyper_params.currentEpoch + 1)
== hyper_params.classifier_epoch)):
print('train performance')
pred_Y_train = predict(classifier, train_X)
_ = print_predict(train_Y, pred_Y_train, hyper_params)
if (((hyper_params.currentEpoch + 1) % 5 == 0) | (hyper_params.currentEpoch < 10)):
print('test performance')
pred_Y = predict(classifier, test_X)
measurements = print_predict(test_Y, pred_Y, hyper_params)
def train_KD(hyper_params, train_X, train_Y):
print(f"train_KD\ninput dim: {hyper_params.KD_input_dim} output dim: {hyper_params.KD_output_dim}")
hyper_params.KD_input_dim = train_X.shape[1]
hyper_params.KD_output_dim = train_Y.shape[1]
classifier = KnowledgeDistillation(hyper_params)
if torch.cuda.is_available():
classifier = classifier.cuda()
optimizer = optim.Adam(classifier.parameters(), weight_decay=hyper_params.classifier_L2)
criterion = nn.MSELoss()
for epoch in range(hyper_params.KD_epoch):
losses = []
for i, sample in enumerate(train_X):
if torch.cuda.is_available():
inputv = Variable(torch.FloatTensor(sample)).view(1, -1).cuda()
labelsv = Variable(torch.FloatTensor(train_Y[i])).view(1, -1).cuda()
else:
inputv = Variable(torch.FloatTensor(sample)).view(1, -1)
labelsv = Variable(torch.FloatTensor(train_Y[i])).view(1, -1)
classifier.train()
optimizer.zero_grad()
output = classifier(inputv)
# print(output,labelsv)
# print(output.shape,labelsv.shape)
loss = criterion(output, labelsv)
loss.backward()
optimizer.step()
losses.append(loss.data.mean().item())
print('[%d/%d]Distillation Loss: %.3f' % (epoch + 1, hyper_params.KD_epoch, np.mean(losses)))
print('complete the training')
return classifier
def train_integrated_model(hyper_params, KD_model, train_X, train_Y, mapping_train_Y_new, train_Y_new,
test_X, soft_test_Y, mapping_test_Y_new, test_Y_new):
hyper_params.classifier_input_dim = train_X.shape[1]
hyper_params.classifier_output_dim = train_Y_new.shape[1]
hyper_params.classifier_hidden1 = KD_model.state_dict()['W_m.0.weight'].shape[0]
hyper_params.KD_input_dim = train_X.shape[1]
hyper_params.kD_output_dim = hyper_params.classifier_hidden1
classifier_W_m = KD_model
classifier_W_m_dict = classifier_W_m.state_dict()
if torch.cuda.is_available():
integrated_model = IntegratedModel(hyper_params).cuda()
else:
integrated_model = IntegratedModel(hyper_params)
integrated_model_dict = integrated_model.state_dict()
classifier_W_m_dict = {k: v for k, v in classifier_W_m_dict.items() if k in integrated_model_dict}
integrated_model_dict.update(classifier_W_m_dict)
integrated_model.load_state_dict(integrated_model_dict, strict=False)
# for param in integrated_model.parameters():
# param.requires_grad = False
# mapping_model = torch.load('model/bestModel/10.31experiment/mapping_epoch6_64-00.5soft_0.5hard') ,'lr': 0.0001
# optimizer = torch.optim.Adam([
# {'params':integrated_model.W_m.parameters(), 'lr': 0.001},
# {'params':integrated_model.representation.parameters()},
# {'params':integrated_model.mapping_W.parameters()},
# ], weight_decay=hyper_params.classifier_L2)
optimizer = optim.Adam(integrated_model.parameters(), weight_decay=hyper_params.classifier_L2)
criterion = nn.MultiLabelSoftMarginLoss()
for epoch in range(hyper_params.classifier_epoch):
hyper_params.currentEpoch = epoch
losses = []
for i, sample in enumerate(train_X):
if torch.cuda.is_available():
inputv = Variable(torch.FloatTensor(sample)).view(1, -1).cuda()
labelsv = Variable(torch.FloatTensor(train_Y_new[i])).view(1, -1).cuda()
mapping_y_new = Variable(torch.FloatTensor(mapping_train_Y_new[i])).view(1, -1).cuda()
else:
inputv = Variable(torch.FloatTensor(sample)).view(1, -1)
labelsv = Variable(torch.FloatTensor(train_Y_new[i])).view(1, -1)
mapping_y_new = Variable(torch.FloatTensor(mapping_train_Y_new[i])).view(1, -1)
integrated_model.train()
optimizer.zero_grad()
output = integrated_model(inputv, mapping_y_new)
loss = criterion(output, labelsv) + label_correlation_loss2(output, labelsv)
loss.backward()
optimizer.step()
losses.append(loss.data.mean().item())
print('complete the training')
return integrated_model
def train_classifier(hyper_params, train_X, train_Y, test_X, test_Y):
hyper_params.classifier_input_dim = train_X.shape[1]
hyper_params.classifier_output_dim = train_Y.shape[1]
if hyper_params.classifier_hidden2 == 0:
classifier = _classifier(hyper_params)
else:
classifier = _classifier2(hyper_params)
if torch.cuda.is_available():
classifier = classifier.cuda()
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!")
classifier = nn.DataParallel(classifier, device_ids=[0, 1])
optimizer = optim.Adam(classifier.parameters(), weight_decay=hyper_params.classifier_L2)
criterion = nn.MultiLabelSoftMarginLoss()
for epoch in range(hyper_params.classifier_epoch):
losses = []
classifier.train()
for i, sample in enumerate(train_X):
if torch.cuda.is_available():
inputv = Variable(torch.FloatTensor(sample)).view(1, -1).cuda()
labelsv = Variable(torch.FloatTensor(train_Y[i])).view(1, -1).cuda()
else:
inputv = Variable(torch.FloatTensor(sample)).view(1, -1)
labelsv = Variable(torch.FloatTensor(train_Y[i])).view(1, -1)
optimizer.zero_grad()
output = classifier(inputv)
loss = criterion(output, labelsv)
loss.backward()
optimizer.step()
losses.append(loss.data.mean().item())
print('complete the training')
return classifier
def lossAdd(x, y):
loss1 = nn.MultiLabelSoftMarginLoss()
loss = loss1(x, y) + 0.5 * label_correlation_DIYloss(x, y)
return loss
def lossAddcorrelation(x, y):
loss1 = nn.MultiLabelSoftMarginLoss()
loss = loss1(x, y) + label_correlation_loss2(x, y)
return loss
def train_classifier_batch(hyper_params, train_X, train_Y, test_X, test_Y, train_loader):
hyper_params.classifier_input_dim = train_X.shape[1]
hyper_params.classifier_output_dim = train_Y.shape[1]
# hyper_params.model_name = 'classifier'
if hyper_params.batchNorm:
hyper_params.model_name = 'classifier-BatchNorm'
if hyper_params.classifier_hidden2 == 0:
classifier = _classifier(hyper_params)
else:
classifier = _classifier2(hyper_params)
if torch.cuda.is_available():
classifier = classifier.cuda()
optimizer = optim.Adam(classifier.parameters(), weight_decay=hyper_params.classifier_L2)
# optimizer_2 = optim.SGD([{'params': w1, 'lr': 0.1},
# {'params': w2, 'lr': 0.001}])
if hyper_params.loss == 'entropy':
criterion = nn.MultiLabelSoftMarginLoss()
elif hyper_params.loss == 'correlation':
criterion = label_correlation_loss2
elif hyper_params.loss == 'correlation_entropy':
criterion = lossAddcorrelation
elif hyper_params.loss == 'DIY':
criterion = DIYloss()
elif hyper_params.loss == 'DIY_entropy':
criterion = lossAdd
else:
print('please choose loss function (CrossEntropy is default)')
criterion = nn.MultiLabelSoftMarginLoss()
train_step = make_train_step(classifier, criterion, optimizer)
eval_step = make_eval_step(classifier, criterion, optimizer)
training_losses = []
# for each epoch
for epoch in range(hyper_params.classifier_epoch):
batch_losses = []
hyper_params.currentEpoch = epoch
if ((epoch+1) % 20 == 0) & hyper_params.changeloss:
losses = []
classifier.train()
for i, sample in enumerate(train_X):
if (i+1) % 10 == 0:
if torch.cuda.is_available():
inputv = Variable(torch.FloatTensor(sample)).view(1, -1).cuda()
labelsv = Variable(torch.FloatTensor(train_Y[i])).view(1, -1).cuda()
else:
inputv = Variable(torch.FloatTensor(sample)).view(1, -1)
labelsv = Variable(torch.FloatTensor(train_Y[i])).view(1, -1)
output = classifier(inputv)
loss = criterion(output, labelsv) + label_correlation_loss2(output, labelsv)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.data.mean().item())
observe_train(hyper_params, classifier, losses, train_X, train_Y, test_X, test_Y)
print('\nchange loss:', np.mean(losses))
for x_batch, y_batch in train_loader:
x_batch = x_batch.to(hyper_params.device)
y_batch = y_batch.to(hyper_params.device)
loss = train_step(x_batch, y_batch)
batch_losses.append(loss)
training_loss = np.mean(batch_losses)
training_losses.append(training_loss)
observe_train(hyper_params, classifier, training_losses, train_X, train_Y, test_X, test_Y)
print('complete the training')
return classifier
def make_train_DSLL(model, loss_fn, optimizer):
def train_step_DSLL(x, y_mapping, y):
# Sets model to TRAIN mode
model.train()
optimizer.zero_grad()
# Makes predictions
yhat, kd_mid, trans_mid, ss_mid = model(x,y_mapping)
loss = loss_fn(yhat, y)
# Computes gradients
loss.mean().backward()
# loss.backward()
# Updates parameters and zeroes gradients
optimizer.step()
return loss, kd_mid, trans_mid, ss_mid
# Returns the function that will be called inside the train loop
return train_step_DSLL
def make_eval_DSLL(model, loss_fn, optimizer):
def eval_step_DSLL(x, y_mapping, y):
# Sets model to EVAL mode
model.eval()
optimizer.zero_grad()
# Makes predictions
yhat, kd_mid, trans_mid, ss_mid = model(x,y_mapping)
loss = loss_fn(yhat, y)
return loss, kd_mid, trans_mid, ss_mid
# Returns the function that will be called inside the train loop
return eval_step_DSLL
################################################################## DSLL ###################################################################
def train_DSLL_model(hyper_params, featureKD_model, train_X, train_Y, mapping_train_Y_new, train_Y_new, test_X, mapping_test_Y_new, test_Y_new, train_loader, use_al):
# if use_al is True then there is no train loader, then it is a combination of the seed and pool which each consist out of three items
hyper_params.classifier_input_dim = train_X.shape[1]
hyper_params.classifier_output_dim = train_Y.shape[1]
device = hyper_params.device
hyper_params.model_name = 'DSLL'
# create new classifier
classifier = IntegratedDSLL(hyper_params)
# copy weight information from KnowledgeDistillation 1st layer to IntegratedDSLL first layer
classifier_W_m = featureKD_model
classifier_W_m_dict = classifier_W_m.state_dict()
classifier_dict = classifier.state_dict()
classifier_W_m_dict = {k: v for k, v in classifier_W_m_dict.items() if k in classifier_dict}
classifier_dict.update(classifier_W_m_dict)
classifier.load_state_dict(classifier_dict, strict=False)
if torch.cuda.is_available():
classifier = classifier.cuda()
# optimizer = optim.RMSprop(classifier.parameters())
optimizer = torch.optim.Adam([
{'params':classifier.W_m.parameters()}, # , 'lr': 0.0001},
{'params':classifier.seniorStudent.parameters()},
{'params':classifier.transformation.parameters()},
], weight_decay=hyper_params.classifier_L2, lr = 0.001)
if hyper_params.loss == 'entropy':
criterion = nn.MultiLabelSoftMarginLoss()
elif hyper_params.loss == 'correlation':
criterion = label_correlation_loss2
elif hyper_params.loss == 'correlation_entropy':
criterion = lossAddcorrelation
elif hyper_params.loss == 'DIY':
criterion = DIYloss()
elif hyper_params.loss == 'DIY_entropy':
criterion = lossAdd
else:
#print('please choose loss function (MultiLabelSoftMarginLoss is default)')
# criterion = nn.MultiLabelSoftMarginLoss(reduction ='none')
criterion = AsymmetricLoss(reduce=False)
# train_step = make_train_DSLL(classifier, criterion, optimizer)
eval_step = make_eval_DSLL(classifier, criterion, optimizer)
# lp_criterions = {"ndcg": approxNDCGLoss(), "lambda":LambdaLoss(), "listnet": ListNetLoss(), "listMLE": ListMLELoss(), \
# "RMSE":RMSELoss(), "rank":RankLoss(),"mapranking":MapRankingLoss(),"spearman":SpearmanLoss()}
lp_criterion = MarginRankingLoss_learning_loss()
classifier_lpm = LossPredictionMod(hyper_params)
# optimizer2 = optim.Adam(classifier_lpm.parameters(), weight_decay=hyper_params.classifier_L2)
optimizer2 = torch.optim.Adam([
{'params':classifier_lpm.Fc1.parameters()}, # , 'lr': 0.0001},
{'params':classifier_lpm.Fc2.parameters()},
{'params':classifier_lpm.Fc3.parameters()},
{'params':classifier_lpm.fc_concat.parameters()},
], weight_decay=hyper_params.classifier_L2, lr=0.001)
if torch.cuda.is_available():
classifier_lpm = classifier_lpm.cuda()
# if ac:
activation = {}
def get_activation(name):
# print("INN")
def hook(model, input, output):
activation[name] = output.detach()
print("INN")
print(output.detach())
return hook
training_losses, full_losses, full_measurements = [], [], []
# for each epoch
x_axis, ndcg_saved = [], []
for epoch in range(hyper_params.classifier_epoch):
batch_losses = []
hyper_params.currentEpoch = epoch
for x_batch, y_mapping, y_batch in train_loader:
x_batch = x_batch.to(device)
y_mapping = y_mapping.to(device)
y_batch = y_batch.to(device)
batch_size = x_batch.shape[0]
# loss, kd_mid, trans_mid, ss_mid = train_step(x_batch, y_mapping, y_batch)
classifier.train()
optimizer.zero_grad()
optimizer2.zero_grad()
yhat, kd_mid, trans_mid, ss_mid = classifier(x_batch,y_mapping)
loss = criterion(yhat, y_batch)
loss.mean().backward()
optimizer.step()
# training_losses.append(loss.mean().detach())
# loss_predicted_loss = classifier_lpm(kd_mid, trans_mid, ss_mid)
# if epoch <= 1:
# loss2 = lp_criterion(loss_predicted_loss, loss.unsqueeze(1))
# calc_loss = loss.mean() + loss2
# calc_loss.backward()
# optimizer2.step()
# optimizer.step()
# else:
# calc_loss = loss.mean()
# calc_loss.backward()
# optimizer.step()
# print(loss)
# exit()
batch_losses.append(loss.mean().item()) #.mean()
# print(kd_mid, trans_mid, ss_mid)
# copy weights from IntegratedDSLL model to loss prediction model
# classifier.eval()
if epoch <= 10 and len(batch_losses) % 5 == 0:
measurements = observe_train_DSLL(hyper_params, classifier, training_losses, train_X, mapping_train_Y_new, train_Y_new, test_X,
mapping_test_Y_new, test_Y_new)
if measurements != None:
full_measurements.append(measurements)
# how many epochs does the loss module train
if epoch <= 1:
optimizer.zero_grad()
optimizer2.zero_grad()
try:
# Makes predictions detach old loss function (only update over loss prediction module)
kd_mid, trans_mid, ss_mid = kd_mid.detach(), trans_mid.detach(), ss_mid.detach()
predicted_loss = classifier_lpm(kd_mid, trans_mid, ss_mid)
loss2 = lp_criterion(predicted_loss, loss.unsqueeze(1).detach())
loss3 = loss.mean().detach() + loss2
# Computes gradients and updates model
loss3.backward()
optimizer2.step()
except:
print("WEIRD ERROR")
optimizer2.zero_grad()
# if len(batch_losses) % 1 == 0:
# print(predicted_loss,loss.unsqueeze(1))
# print(f"loss prediction loss: {loss3}")
#### TRAIN NDCG #####
# true loss number is number in row (first is highest)
# ndcg_true = np.asarray(loss.unsqueeze(1).cpu().detach().numpy())
# ndcg_seq = sorted(ndcg_true)
# ndcg_index = np.asarray([ndcg_seq.index(v) for v in ndcg_true])[..., np.newaxis]
# compare rank with score higher score is higher confidence so needs to match true loss rank
# ndcg_score = np.asarray(predicted_loss.cpu().detach().numpy())
# right size for the ndcg is (1,batch_size)
# ndcg_index.resize(1,batch_size)
# ndcg_score.resize(1,batch_size)
# ndcg at half of batch size
# batch_ndcg = metrics.ndcg_score(ndcg_index,ndcg_score, k=int(batch_size/2))
# TRAIN NDCG SAVED MOVED FOR TEST
# ndcg_saved.append(batch_ndcg)
# print(f"batch NDCG real score: {batch_ndcg}")
##### TEST NDCG #####
# classifier.eval()
# test_loss ,kd_mid_test, trans_mid_test, ss_mid_test = eval_step( torch.from_numpy(test_X).float().to(device),\
# torch.from_numpy(mapping_test_Y_new).float().to(device),\
# torch.from_numpy(test_Y_new).float().to(device))
# kd_mid_test, trans_mid_test, ss_mid_test = kd_mid_test.detach(), trans_mid_test.detach(), ss_mid_test.detach()
# predicted_loss_test = classifier_lpm(kd_mid_test, trans_mid_test, ss_mid_test)
# # print(f"Test loss size: {test_loss.shape[0]}, with mean of {test_loss.mean()}")
# ndcg_true = np.asarray(test_loss.unsqueeze(1).cpu().detach().numpy())
# ndcg_seq = sorted(ndcg_true)
# ndcg_index = np.asarray([ndcg_seq.index(v) for v in ndcg_true])[..., np.newaxis]
# # compare rank with score higher score is higher confidence so needs to match true loss rank
# ndcg_score = np.asarray(predicted_loss_test.cpu().detach().numpy())
# # right size for the ndcg is (1,batch_size)
# ndcg_index.resize(1,test_loss.shape[0])
# ndcg_score.resize(1,test_loss.shape[0])
# # ndcg at 10 percent of test size
# test_ndcg = metrics.ndcg_score(ndcg_index,ndcg_score, k=int(test_loss.shape[0]*0.1))
# ndcg_saved.append(test_ndcg)
# print(f"Test NDCG: {test_ndcg}")
# classifier.train()
full_losses.append(batch_losses)
training_loss = np.mean(batch_losses)
training_losses.append(training_loss)
# measurements2 = observe_train_DSLL(hyper_params, classifier, training_losses, train_X, mapping_train_Y_new, train_Y_new, test_X,
# mapping_test_Y_new, test_Y_new)
if measurements != None:
print(measurements)
full_measurements.append(measurements)
# print(full_losses)
# print losses figure
# [F1_Micro, F1_Macro, AUC_micro, AUC_macro]
F1_Micro = np.hstack(np.array(full_measurements)[:,0])
F1_Macro = np.hstack(np.array(full_measurements)[:,1])
AUC_micro = np.hstack(np.array(full_measurements)[:,2])
AUC_macro = np.hstack(np.array(full_measurements)[:,3])
# print(f"{F1_Micro}\n{F1_Macro}\n{AUC_micro}\n{AUC_macro}")
x_axis = [i for i in range(len(F1_Micro))]
import matplotlib.pyplot as plt
plt.plot(x_axis, F1_Micro, label='F1_Micro')
plt.plot(x_axis, F1_Macro, label='F1_Macro')
plt.plot(x_axis, AUC_micro, label='AUC_micro')
plt.plot(x_axis, AUC_macro, label='AUC_macro')
plt.xlabel('Instances', fontsize=18)
plt.ylabel('Values', fontsize=16)
# plt.ylim(0, 1)
# from scipy.stats import linregress
# slope, intercept, r_value, p_value, std_err = linregress(x_axis, F1_Micro)
# print(f"Slope is: {slope*1000}")
# print(f"Mean NDCG: {np.mean(F1_Micro)}")
# plt.plot(np.unique(x_axis), np.poly1d(np.polyfit(x_axis, F1_Micro, 1))(np.unique(x_axis)))
plt.legend(bbox_to_anchor=(1,1), loc="upper left")
plt.show()
# exit()
print('complete the training')
return classifier
def set_random_seed(seednr):
random.seed(seednr)
torch.manual_seed(seednr)
torch.cuda.manual_seed(seednr)
np.random.seed(seednr)
random.seed(seednr)
torch.backends.cudnn.enabled=False
torch.backends.cudnn.deterministic=True
def rmse(actual, pred):
actual, pred = np.array(actual), np.array(pred)
return np.sqrt(np.square(np.subtract(actual,pred)).mean())
from sklearn.multioutput import ClassifierChain
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import jaccard_score, f1_score
from sklearn.linear_model import LogisticRegression
def classifier_chain(train_loader, test_ds):
xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, oldyseed, oldypool = train_loader
xseed_save, y_mappingseed_save, yseed_save, xpool_save, y_mappingpool_save, ypool_save, oldyseed_save, oldypool_save = xseed.numpy(), y_mappingseed.numpy(), yseed.numpy(), xpool.numpy(), y_mappingpool.numpy(), ypool.numpy(), oldyseed.numpy(), oldypool.numpy()
batch_size = len(xseed)
max_budget = int(len(xpool)/batch_size)
# how big is the active learning budget
# make the budget the same or half as the max
budget = int(max_budget//2)
xseed_save = np.concatenate((xseed_save,oldyseed_save),axis=1)
xpool_save = np.concatenate((xpool_save,oldypool_save),axis=1)
x_tensor_test, y_mapping_tensor_test, old_y_tensor_test, y_tensor_test = test_ds
x_tensor_test, y_mapping_tensor_test, old_y_tensor_test, y_tensor_test = x_tensor_test.numpy(), y_mapping_tensor_test.numpy(), old_y_tensor_test.numpy(), y_tensor_test.numpy()
cc_f1_scores = []
br_f1_scores = []
altypes = ['svm', 'random', 'rf'][:1]
for br_cc in ["BR"]: #"CC"
svm_al_learner = ActiveSVMLearner()
forest_al_learner = ActiveForestLearner()
xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, oldyseed, oldypool= xseed_save, y_mappingseed_save, yseed_save, xpool_save, y_mappingpool_save, ypool_save, oldyseed_save, oldypool_save
for altype in altypes:
for bud in range(budget):
print(bud)
if br_cc =="CC" :
base_lr = LogisticRegression()
chain = ClassifierChain(base_lr, order='random', random_state=10)
chain.fit(xseed, yseed)
Y_pred_ovr = chain.predict(x_tensor_test)
ovr_f1_score = f1_score(y_tensor_test, Y_pred_ovr, average='micro')
cc_f1_scores.append(ovr_f1_score)
# if altype == "svm":
chosen_indices = svm_al_learner.forward(torch.from_numpy(xseed), torch.from_numpy(y_mappingseed), torch.from_numpy(yseed), torch.from_numpy(xpool), torch.from_numpy(y_mappingpool), torch.from_numpy(ypool), batch_size)
# add selected items to the seed
xseed = np.concatenate((xseed,xpool[chosen_indices]),0)
y_mappingseed = np.concatenate((y_mappingseed,y_mappingpool[chosen_indices]),0)
yseed = np.concatenate((yseed,ypool[chosen_indices]),0)
# calculate which items need to be deleted from the pool (the ones that are chosen==new seed)
all_indices = np.arange(0, len(xpool))
non_chosen_items = list(set(all_indices) - set(chosen_indices))
# remove the seeds from the pool
xpool = xpool[non_chosen_items]
y_mappingpool = y_mappingpool[non_chosen_items]
ypool = ypool[non_chosen_items]
if br_cc == "BR":
base_lr = LogisticRegression()
ovr = OneVsRestClassifier(base_lr)
ovr.fit(xseed, yseed)
Y_pred_ovr = ovr.predict(x_tensor_test)
ovr_f1_score = f1_score(y_tensor_test, Y_pred_ovr, average='micro')
br_f1_scores.append(ovr_f1_score)
# if altype == "svm":
chosen_indices = svm_al_learner.forward(torch.from_numpy(xseed), torch.from_numpy(y_mappingseed), torch.from_numpy(yseed), torch.from_numpy(xpool), torch.from_numpy(y_mappingpool), torch.from_numpy(ypool), batch_size)
# add selected items to the seed
xseed = np.concatenate((xseed,xpool[chosen_indices]),0)
y_mappingseed = np.concatenate((y_mappingseed,y_mappingpool[chosen_indices]),0)
yseed = np.concatenate((yseed,ypool[chosen_indices]),0)
# calculate which items need to be deleted from the pool (the ones that are chosen==new seed)
all_indices = np.arange(0, len(xpool))
non_chosen_items = list(set(all_indices) - set(chosen_indices))
# remove the seeds from the pool
xpool = xpool[non_chosen_items]
y_mappingpool = y_mappingpool[non_chosen_items]
ypool = ypool[non_chosen_items]
# print(f1_scores)
import matplotlib.pyplot as plt
import seaborn as sns
plt.figure(figsize=(12, 7))
sns.set_style("darkgrid")
# define colours by hand (because easier to know what is happening)
colours = [["7215CC","008000", "00A000", "00C000", "00E000"], ["2CFF72","800000", "A00000", "C00000", "E00000"], ["D6D243","000080", "0000A0", "0000C0", "0000E0"], ["F74D7E","404000", "808000", "B0B000", "F0F000"], ["3E8DFF","404000", "808000", "B0B000", "F0F000"]]
x_axis = [i for i in range(len(br_f1_scores))]
# plt.plot(x_axis,cc_f1_scores)
plt.plot(x_axis,br_f1_scores)
plt.ylabel('F1', fontsize=16)
plt.xlabel('Instances', fontsize=18)
# plt.ylabel('Values', fontsize=16)
# from scipy.stats import linregress
# plt.title("Active learning with DSLL")
plt.title("F1 Classifier chain to real loss")
plt.legend(bbox_to_anchor=(1,1), loc="upper left")
plt.show()
exit()
################################################################## AL DSLL ##################################################################
def AL_train_DSLL_model(hyper_params, featureKD_model, train_X, train_Y, mapping_train_Y_new, train_Y_new, test_X, mapping_test_Y_new, test_Y_new, train_loader, test_ds, use_al, seednr):
# classifier_chain(train_loader, test_ds)
# if use_al is True then there is no train loader, then it is a combination of the seed and pool which each consist out of three items
hyper_params.classifier_input_dim = train_X.shape[1]
hyper_params.classifier_output_dim = train_Y.shape[1]
device = hyper_params.device
hyper_params.model_name = 'DSLL'
from collections import defaultdict
training_losses, full_losses, full_measurements = [], [], defaultdict(list)
# base of active learning train with the seed then add from pool the best examples and train more
xseed_save, y_mappingseed_save, yseed_save, xpool_save, y_mappingpool_save, ypool_save, _, _ = train_loader
batch_size = len(xseed_save)
# print(f"BATCH SIZE IS: ########################## {batch_size}")
# print(batch_size)
# how many times (with different seed) do you want to run the random active learner (for smooth graph) leave at 5
# random_amount = 5
# svm and rf callable classes for the forward passes
svm_al_learner = ActiveSVMLearner()
forest_al_learner = ActiveForestLearner()
worstcasesvm_al_learner = ActiveForestLearnerWorstCase()
# which of the loss prediction module selection procedures you want to use
# choose{original, kmeans, distance}
# lpm_selection = "original" #"kmeans"
x_tensor_test, y_mapping_tensor_test, old_y_tensor_test, y_tensor_test = test_ds
# x_tensor_test, y_mapping_tensor_test, old_y_tensor_test, y_tensor_test = x_tensor_test.numpy(), y_mapping_tensor_test.numpy(), old_y_tensor_test.numpy(), y_tensor_test.numpy()
# for now altype can be worstcase, random, forest, svm, or lpm
# set the range for which active learning methods you want to see, now just altype is given in the DSLL file
# altypes = ['worstcase', 'svm', 'random', 'rf', 'lpm', 'lpm_pointwise', 'lpm_rankwise'][2:3] #4:5
# for altype in altypes:
altype = hyper_params.altype
# create new classifier
classifier = IntegratedDSLL(hyper_params)
# copy weight information from KnowledgeDistillation 1st layer to IntegratedDSLL first layer
classifier_W_m = featureKD_model
classifier_W_m_dict = classifier_W_m.state_dict()
classifier_dict = classifier.state_dict()
classifier_W_m_dict = {k: v for k, v in classifier_W_m_dict.items() if k in classifier_dict}
classifier_dict.update(classifier_W_m_dict)
classifier.load_state_dict(classifier_dict, strict=False)
if torch.cuda.is_available():
classifier = classifier.cuda()
optimizer = torch.optim.Adam([
{'params':classifier.W_m.parameters()}, # , 'lr': 0.0001},
{'params':classifier.seniorStudent.parameters()},
{'params':classifier.transformation.parameters()},
], weight_decay=hyper_params.classifier_L2, lr = float(hyper_params.classifier_lr))
print(float(hyper_params.classifier_lr))
# lp_criterions = {"ndcg": approxNDCGLoss(), "lambda":LambdaLoss(), "listnet": ListNetLoss(), "listMLE": ListMLELoss(), \
# "RMSE":RMSELoss(), "rank":RankLoss(),"mapranking":MapRankingLoss(),"spearman":SpearmanLoss()}
lpm_selection = hyper_params.lpm_selection #"kmeans" #"kmeans"
# main loss function
criterion = AsymmetricLoss(reduce=False)
# train_step = make_train_DSLL(classifier, criterion, optimizer)
# eval_step = make_eval_DSLL(classifier, criterion, optimizer)
# selects the way the loss is learned, so what the loss is of the lpm loss compared to the real classifier loss
if str(hyper_params.lpm_criterion) == "MSELOSS":
lp_criterion = nn.MSELoss()
elif str(hyper_params.lpm_criterion) == "PAIRWISE":
lp_criterion = MarginRankingLoss_learning_loss()
elif str(hyper_params.lpm_criterion) == "LISTWISE":
lp_criterion = LambdaLoss()
########################################################################### LOAD STATE DICT
# PATH = "models/lpm_models/lpm_model_mse_3to5_v1_1.pth"
classifier_lpm = LossPredictionMod(hyper_params)
# classifier_lpm.load_state_dict(torch.load(PATH))
# optimizer2 = optim.Adam(classifier_lpm.parameters(), weight_decay=hyper_params.classifier_L2)
optimizer2 = torch.optim.Adam([
{'params':classifier_lpm.Fc1.parameters()}, # , 'lr': 0.0001},
{'params':classifier_lpm.Fc2.parameters()},
{'params':classifier_lpm.Fc3.parameters()},
{'params':classifier_lpm.fc_concat.parameters()},
], weight_decay=hyper_params.classifier_L2, lr=float(hyper_params.lpm_lr))
print(hyper_params.lpm_lr)
classifier_lpm_old = LossPredictionMod(hyper_params)
# optimizer2 = optim.Adam(classifier_lpm.parameters(), weight_decay=hyper_params.classifier_L2)
# optimizer3 = torch.optim.Adam([
# {'params':classifier_lpm_old.Fc1.parameters()}, # , 'lr': 0.0001},
# {'params':classifier_lpm_old.Fc2.parameters()},
# {'params':classifier_lpm_old.Fc3.parameters()},
# {'params':classifier_lpm_old.fc_concat.parameters()},
# ], weight_decay=hyper_params.classifier_L2, lr=0.001)
if torch.cuda.is_available():
classifier_lpm = classifier_lpm.cuda()
classifier_lpm_old = classifier_lpm_old.cuda()
# reload the original (saved) values
xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool = xseed_save, y_mappingseed_save, yseed_save, xpool_save, y_mappingpool_save, ypool_save
# what is the max budget possible
max_budget = int(len(xpool)/batch_size)
# how big is the active learning budget
# make the budget the same or half as the max
budget = int(max_budget-1) #int(max_budget/2)
# for each epoch
x_axis, ndcg_saved = [], []
# do many tests for a smoother line TODO V0.3 make work for other active learners
# if altype == 'random':
# random_amount = 3 if altype == "random" else 2
# if altype == "random":
# random_amount = 1
# full_measurements[altype] = do_many_active(altype, budget, device, xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, hyper_params, training_losses, train_X, mapping_train_Y_new, train_Y_new, test_X,
# mapping_test_Y_new, test_Y_new, batch_size, random_amount, featureKD_model)
# continue
# kmeans initialized with the original xpool, the df2 contains its labels and value get deleted when chosen for seed
if lpm_selection == "kmeans":
# we can incease/decrease the batch_size multiplier between 2-4
# best results: multiplier:3, ncomponents:4
batch_size_multiplier = hyper_params.batch_size_multiplier
kmeans = KMeans(
init="random",
n_clusters=int(batch_size*batch_size_multiplier),
n_init=10,
max_iter=300,
random_state=42
)
# Standard scaler and dim reduction improve results
from sklearn.decomposition import PCA
pca = PCA(n_components=int(hyper_params.pca_comp))
scaler = StandardScaler()
scaled_features = scaler.fit_transform(xpool)
principalComponents = pca.fit_transform(scaled_features)
# principalComponents = umap.UMAP(metric='mahalanobis', n_neighbors=30, n_components=4).fit_transform(xpool.numpy())
kmeans.fit(principalComponents)
# df2 = pd.DataFrame(kmeans.labels_, columns = ['cluster'])
# create a dataframe where the first column is the cluster number per point
# and the second col is the list with values for each item
df2 = pd.DataFrame(kmeans.labels_, columns = ['cluster'])
df2["xpool"] = pd.Series(xpool.numpy().tolist())
df2["ymappingpool"] = pd.Series(y_mappingpool.numpy().tolist())
df2["ypool"] = pd.Series(ypool.numpy().tolist())
# add the pca locations of the points as a column
# df2['pca'] = pd.Series(principalComponents.tolist())
# exit()
# TODO not working yet! fix the distance
elif lpm_selection == "distance":
from sklearn.decomposition import PCA
pca = PCA(n_components=4)
scaler = StandardScaler()
scaled_features = scaler.fit_transform(xpool)
principalComponents = pca.fit_transform(scaled_features)
df2_pca = pd.DataFrame()
df2_pca['pca'] = pd.Series(principalComponents.tolist())
elif lpm_selection == "original":
df2 = pd.DataFrame()
# df2["xpool"] = pd.Series(xpool.numpy().tolist())
# df2["ymappingpool"] = pd.Series(y_mappingpool.numpy().tolist())
# df2["ypool"] = pd.Series(ypool.numpy().tolist())
set_random_seed(seednr)
lpm_loss_list =[]
lpm_error_list = []
# how many epochs need to happen
hyper_params.classifier_epoch = 1
for epoch in range(hyper_params.classifier_epoch):
batch_losses = []
hyper_params.currentEpoch = epoch
# loop over the max iterations of the budget
for bud in range(budget):
# break if the early stopping is reached in the DSLL file or command line
# if bud >= 50:#int(budget*float(hyper_params.early_stopping)):
# break
# just take the last batch_size items to train, these are selected out of the pool to train on
x_batch, y_mapping, y_batch = xseed[-batch_size:], y_mappingseed[-batch_size:], yseed[-batch_size:]
x_batch = x_batch.to(device)
y_mapping = y_mapping.to(device)
y_batch = y_batch.to(device)
batch_size = x_batch.shape[0]
# train with the seed and later the pool
classifier.train()
optimizer.zero_grad()
optimizer2.zero_grad()
yhat, kd_mid, trans_mid, ss_mid = classifier(x_batch,y_mapping)
loss = criterion(yhat, y_batch)
loss.mean().backward()
optimizer.step()
batch_losses.append(loss.mean().item()) #.mean()
print(f"Loss: {loss.mean()}")
# obtain measurements
# if epoch <= 10 : # and len(batch_losses) % 1 == 0
measurements = observe_train_DSLL(hyper_params, classifier, training_losses, train_X, mapping_train_Y_new, train_Y_new, x_tensor_test,
y_mapping_tensor_test, y_tensor_test) # test_X, mapping_test_Y_new, test_Y_new
if measurements != None:
full_measurements[altype].append(measurements)
############## ACTIVE LEARNING PART ##############
# use one of the active learning selection procedures
if altype == "random":
chosen_indices = active_random(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
elif altype == "worstcase":
# chosen_indices = active_worstcase(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
chosen_indices = worstcasesvm_al_learner.forward(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
elif altype == "svm":
chosen_indices = svm_al_learner.forward(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
elif altype == "rf":
chosen_indices = forest_al_learner.forward(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
elif altype == "lpm":
# Loss learning module
# train the loss prediction module with the seed
if epoch <= 1:
optimizer.zero_grad()
optimizer2.zero_grad()
# optimizer3.zero_grad()
try:
# Makes predictions detach old loss function (only update over loss prediction module)
kd_mid, trans_mid, ss_mid = kd_mid.detach(), trans_mid.detach(), ss_mid.detach()
predicted_loss = classifier_lpm(kd_mid, trans_mid, ss_mid)
loss2 = lp_criterion(predicted_loss, loss.unsqueeze(1).detach())
loss3 = loss.mean().detach() + loss2
# Computes gradients and updates model
loss3.backward()
optimizer2.step()
except:
print("WEIRD ERROR")
optimizer2.zero_grad()
# optimizer3.zero_grad()
# calculate the error between the predicted loss and the real loss of the test set
classifier.eval()
classifier_lpm.eval()
# get the model output of the test (no training)
yhat_test, kd_mid_test, trans_mid_test, ss_mid_test = classifier(x_tensor_test.to(device),y_mapping_tensor_test.to(device))
kd_mid_test, trans_mid_test, ss_mid_test = kd_mid_test.detach(), trans_mid_test.detach(), ss_mid_test.detach()
predicted_loss_test = classifier_lpm(kd_mid_test, trans_mid_test, ss_mid_test)
predicted_losses_array = np.asarray(predicted_loss_test.cpu().detach().numpy())
# print()
# y test to actual loss of lpm calculation
loss_test = criterion(yhat_test, y_tensor_test.to(device))
# calculate the RMSE between the actual and predicted pool loss
loss_test = loss_test.cpu().detach().numpy()
# resize only to remove the 1 of the last dimension (list in lists), if not it is less acurate
predicted_losses_array.resize(loss_test.shape[0],)
# calculate the rmse between the predicted lpm loss and the actual loss
lpm_error = rmse(predicted_losses_array,loss_test)
# append all the errors of each iteration of the budget to the list for visualization later on
lpm_error_list.append(lpm_error)
# predict best items to select in the pool
yhat_test, kd_mid_test, trans_mid_test, ss_mid_test = classifier(xpool.to(device), y_mappingpool.to(device))
kd_mid_test, trans_mid_test, ss_mid_test = kd_mid_test.detach(), trans_mid_test.detach(), ss_mid_test.detach()
predicted_loss_test = classifier_lpm(kd_mid_test, trans_mid_test, ss_mid_test)
predicted_losses_array = np.asarray(predicted_loss_test.cpu().detach().numpy())
optimizer.zero_grad()
optimizer2.zero_grad()
# def calc_distances(p0, points):
# return ((p0 - points)**2).sum(axis=1)
# # Next, here is a way to implement your algorithm using more numpy functions:
# def graipher(pts, K, indices):
# farthest_pts = np.zeros((K, 4))
# farthest_pts[0] = pts[np.random.randint(len(pts))]
# distances = calc_distances(farthest_pts[0], pts)
# for i in range(1, K):
# farthest_pts[i] = pts[np.argmax(distances)]
# distances = np.minimum(distances, calc_distances(farthest_pts[i], pts))
# return farthest_pts
# print(graipher(np.random.random_sample((20,4)), 3))
# exit()
if lpm_selection == "original":
# the original loss prediction module
# get the top 10 highest loss indices, 10 is here batch_size
top_10_loss_indices = np.argpartition(predicted_losses_array,-batch_size, axis=0)[-batch_size:]
# these are in a list of a list so unpack it
top_10_loss_indices.resize(batch_size,)
chosen_indices = top_10_loss_indices
elif lpm_selection == "kmeans":
# make a dataframe with two columns, the losses and cluster number of the items in the pool and sort them
# the index number is kept so we have the index of the highest loss items in the top
# df1, df2 = pd.DataFrame(predicted_losses_array, columns = ['losses']), pd.DataFrame(kmeans.labels_, columns = ['cluster'])
# df2 are the labels from the pool
df1 = pd.DataFrame(predicted_losses_array, columns = ['losses'])
df2.reset_index(drop=True, inplace=True)
df3 = pd.concat([df1, df2], axis=1)
df4 = df3.sort_values('losses',ascending=False)
chosen_indices, chosen_clusters = [], []
# choose the items if the cluster is not already represented (chosen)
for index, row in df4.iterrows():
# stop condition if we have reached the batch size
if len(chosen_indices) == batch_size:
break
# add to chosen indices if it is not in chosen_clusters (inherently from high loss to low)
if row['cluster'] not in chosen_clusters:
chosen_clusters.append(row['cluster'])
chosen_indices.append(int(index))
# make sure the index is the same in df2 and df4
assert int(df4.loc[int(index)].cluster) == int(df2.loc[int(index)].cluster)
# drop the chosen index from the pool
df2.drop(int(index), inplace=True)
elif lpm_selection == "distance":
# get the top batch_size * 4 highest loss indices
top_10_loss_indices = np.argpartition(predicted_losses_array,-batch_size, axis=0)[-batch_size:]
# these are in a list of a list so unpack it
chosen_indices = [i[0] for i in top_10_loss_indices]
df1 = pd.DataFrame(predicted_losses_array, columns = ['losses'])
df3 = pd.concat([df1, df2_pca], axis=1)
df4 = df3.sort_values('losses',ascending=False)
df4_top_indices = df4.index[:batch_size*4]
# TODO implement graipher
# print(poi)
# print(close_points)
# exit()
else:
print("no lpm selection measure given")
pass
# add selected items to the seed
xseed = torch.cat((xseed,xpool[chosen_indices]),0)
y_mappingseed = torch.cat((y_mappingseed,y_mappingpool[chosen_indices]),0)
yseed = torch.cat((yseed,ypool[chosen_indices]),0)
# calculate which items need to be deleted from the pool (the ones that are chosen==new seed)
all_indices = np.arange(0, len(xpool))
non_chosen_items = list(set(all_indices) - set(chosen_indices))
# remove the seeds from the pool
if 'xpool' in list(df2):
# print(type(ypool))
xpool2 = np.array(list(df2['xpool']), dtype=np.float)
ypool2 = np.array(list(df2['ypool']), dtype=np.float)
y_mappingpool2 = np.array(list(df2['ymappingpool']), dtype=np.float)
xpool = torch.from_numpy(xpool2).float()
ypool = torch.from_numpy(ypool2).float()
y_mappingpool = torch.from_numpy(y_mappingpool2).float()
# print(type(ypool))
# exit()
del xpool2
del ypool2
del y_mappingpool2
else:
xpool = xpool[non_chosen_items]
y_mappingpool = y_mappingpool[non_chosen_items]
ypool = ypool[non_chosen_items]
full_losses.append(batch_losses)
training_loss = np.mean(batch_losses)
training_losses.append(training_loss)
# print("MEASUREMENTSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSs")
# print(full_measurements[altype])
# print( np.hstack(np.array(full_measurements[f'{altype}'])[:,0]))
# print( np.hstack(np.array(full_measurements[f'{altype}'])[:,4]))
# # if measurements != None:
# # print(f"Measurements are: {measurements}")
# # full_measurements[altype].append(measurements)
# exit()
# print("hier")
if hyper_params.split_seed == 22:
labels = "11ij_6"
elif hyper_params.split_seed == 23:
labels = "ij6_11"
elif hyper_params.split_seed == 25:
labels = "611_ij"
else:
labels = "error"
# hyper_params.oldlist = old_list
# hyper_params.newlist = new_list
# print(measurements.shape)
################################################################ SAVE STATE DICT
# torch.save(classifier_lpm.state_dict(), PATH)
# with open(f'npresults/test_13d_old_mse_{hyper_params.batch_size}_{lpm_selection}_{altype}_{str(hyper_params.oldlist)}_{str(hyper_params.newlist)}.npy', 'wb') as f:
# np.save(f, lpm_error_list)
# prediction = predict_integrated(classifier, x_tensor_test, y_mapping_tensor_test)
# print(f"Old y shape: {old_y_tensor_test.shape}\nNew y shape: {y_tensor_test.shape}")
# print(prediction)
# print(type(prediction))
# print(str(hyper_params.newlist))
# return classifier
# hyper_params.pca_comp = kwargs['pca_dimensions']
# hyper_params.classifier_lr
# exit()
# xls = pd.ExcelFile('LEDAExport_20200224_verrijkt_2_voorPepijn.xlsx')
# df = pd.read_excel(xls, 'LEDA_20200224')
# exit()
# {hyper_params.classifier_lr}_{hyper_params.lpm_lr}
if hyper_params.dataset == "leda":
with open(f'npresults/test_13e_train1testother_{altype}_{lpm_selection}_{hyper_params.zero_multiplier}_{str(hyper_params.oldlist)}_{str(hyper_params.newlist)}.npy', 'wb') as f:
np.save(f, np.hstack(np.array(full_measurements[f'{altype}'])[:,0]))
else:
print(f"wrote to file {str(hyper_params.lpm_criterion)}_{hyper_params.batch_size}_{hyper_params.lpm_lr}_{hyper_params.classifier_lr}" )
with open(f'npresults/3test_yeast_hps_{str(hyper_params.lpm_criterion)}_{hyper_params.batch_size}_{hyper_params.lpm_lr}_{hyper_params.classifier_lr}_f1_auc_rec.npy', 'wb') as f:
np.save(f, np.array([np.hstack(np.array(full_measurements[f'{altype}'])[:,0]), np.hstack(np.array(full_measurements[f'{altype}'])[:,2]), np.hstack(np.array(full_measurements[f'{altype}'])[:,4]) ] ) )
return
exit()
import matplotlib.pyplot as plt
import seaborn as sns
fig = plt.figure(figsize=(12, 7))
sns.set_style("darkgrid")
# define colours by hand (because easier to know what is happening)
colours = [["7215CC","008000", "00A000", "00C000", "00E000"], ["2CFF72","800000", "A00000", "C00000", "E00000"], ["D6D243","000080", "0000A0", "0000C0", "0000E0"], ["F74D7E","404000", "808000", "B0B000", "F0F000"], ["3E8DFF","404000", "808000", "B0B000", "F0F000"]]
# loop over the al methods and calculate the measures
# print(full_measurements)
for e, i in enumerate(altypes):
F1_Micro = np.hstack(np.array(full_measurements[i])[:,0])
# F1_Macro = np.hstack(np.array(full_measurements[i])[:,1])
AUC_micro = np.hstack(np.array(full_measurements[i])[:,2])
# AUC_macro = np.hstack(np.array(full_measurements[i])[:,3])
# define x-axis as arange of the length of the array
x_axis = [i for i in range(len(F1_Micro))]
# plot the lines
plt.plot(x_axis, F1_Micro, label=f'F1_Micro_{i}',color = f'#{colours[e][0]}')
# plt.plot(x_axis, F1_Macro, label=f'F1_Macro_{i}',color = f'#{colours[e][1]}')
plt.plot(x_axis, AUC_micro, label=f'AUC_micro_{i}',color= f'#{colours[e][2]}')
# plt.plot(x_axis, AUC_macro, label=f'AUC_macro_{i}',color= f'#{colours[e][3]}')
# if you want to plot RMSE
# x_axis = [i for i in range(len(lpm_error_list))]
# plt.plot(x_axis,lpm_error_list)
# plt.ylabel('RMSE', fontsize=16)
plt.xlabel('Instances', fontsize=18)
plt.ylabel('Values', fontsize=16)
# from scipy.stats import linregress
plt.title("Active learning with DSLL")
# plt.title("RMSE lpm to real loss")
plt.legend(bbox_to_anchor=(1,1), loc="upper left")
fig.savefig('temp.png', dpi=fig.dpi)
# plt.show()
# exit()
print('complete the training')
return classifier
def do_many_active(active_learner, budget, device, xseed_old, y_mappingseed_old, yseed_old, xpool_old, y_mappingpool_old, ypool_old, hyper_params, training_losses, train_X_old, mapping_train_Y_new_old, train_Y_new_old, test_X_old,
mapping_test_Y_new_old, test_Y_new_old, batch_size, max_seednr, featureKD_model, epochs =1):
full_measurements = defaultdict(list)
batch_losses = []
# max_seednr+=1
max_seednr = 1
# which selection procedure used
lpm_selection = "kmeans"
# max_seednr = random_amount
startseed = 10
for seednr in range(startseed, startseed+max_seednr):
worstcasesvm_al_learner = ActiveForestLearnerWorstCase()
for epoch in range(epochs):
set_random_seed(seednr)
# load in the old values
xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool = xseed_old, y_mappingseed_old, yseed_old, xpool_old, y_mappingpool_old, ypool_old
train_X, mapping_train_Y_new, train_Y_new, test_X, mapping_test_Y_new, test_Y_new = train_X_old, mapping_train_Y_new_old, train_Y_new_old, test_X_old, mapping_test_Y_new_old, test_Y_new_old
# reinitialise the active learners with new seed
# svm_al_learner = ActiveSVMLearner()
# forest_al_learner = ActiveForestLearner()
# create new classifier
classifier = IntegratedDSLL(hyper_params)
# copy weight information from KnowledgeDistillation 1st layer to IntegratedDSLL first layer
classifier_W_m = featureKD_model
classifier_W_m_dict = classifier_W_m.state_dict()
classifier_dict = classifier.state_dict()
classifier_W_m_dict = {k: v for k, v in classifier_W_m_dict.items() if k in classifier_dict}
classifier_dict.update(classifier_W_m_dict)
classifier.load_state_dict(classifier_dict, strict=False)
if torch.cuda.is_available():
classifier = classifier.cuda()
optimizer = torch.optim.Adam([
{'params':classifier.W_m.parameters()}, # , 'lr': 0.0001},
{'params':classifier.seniorStudent.parameters()},
{'params':classifier.transformation.parameters()},
], weight_decay=hyper_params.classifier_L2, lr = 0.0001)
# main loss function
criterion = AsymmetricLoss(reduce=False)
# train_step = make_train_DSLL(classifier, criterion, optimizer)
eval_step = make_eval_DSLL(classifier, criterion, optimizer)
lp_criterion = MarginRankingLoss_learning_loss()
classifier_lpm = LossPredictionMod(hyper_params)
# optimizer2 = optim.Adam(classifier_lpm.parameters(), weight_decay=hyper_params.classifier_L2)
optimizer2 = torch.optim.Adam([
{'params':classifier_lpm.Fc1.parameters()}, # , 'lr': 0.0001},
{'params':classifier_lpm.Fc2.parameters()},
{'params':classifier_lpm.Fc3.parameters()},
{'params':classifier_lpm.fc_concat.parameters()},
], weight_decay=hyper_params.classifier_L2, lr=0.0001)
if torch.cuda.is_available():
classifier_lpm = classifier_lpm.cuda()
# for x_batch, y_mapping, y_batch in train_loader:
for e in range(budget):
# just take the last batch_size items to train, these are selected out of the pool to train on
x_batch, y_mapping, y_batch = xseed[-batch_size:], y_mappingseed[-batch_size:], yseed[-batch_size:]
x_batch = x_batch.to(device)
y_mapping = y_mapping.to(device)
y_batch = y_batch.to(device)
batch_size = x_batch.shape[0]
# train with the seed and later the pool
classifier.train()
optimizer.zero_grad()
optimizer2.zero_grad()
yhat, kd_mid, trans_mid, ss_mid = classifier(x_batch,y_mapping)
loss = criterion(yhat, y_batch)
loss.mean().backward()
optimizer.step()
batch_losses.append(loss.mean().item()) #.mean()
# obtain measurements
# measurements is list of 4 items
# test_Y_new = torch.from_numpy(test_Y_new)
measurements = observe_train_DSLL(hyper_params, classifier, training_losses, train_X, mapping_train_Y_new, train_Y_new, test_X,
mapping_test_Y_new, test_Y_new)
# specialcase for first item of the new seed, the first measurements list needs to be appended to the full lists
# the following items will just be added to this list. Numpy can actually add 2 lists so this is used
# for measurements we divide by the max number
# one list per seed, so budget amount of lists to which we add the other lists
if measurements != None and seednr == startseed:
measurements = np.array(measurements)/max_seednr
full_measurements[active_learner].append(measurements.tolist())
# numpy can add values in lists, python just makes bigger listts
elif measurements != None:
pythonlist = full_measurements[active_learner][e]
numpylist = np.array(pythonlist) + np.array(measurements)/max_seednr
full_measurements[active_learner][e] = numpylist.tolist()
# print(np.array(full_measurements[active_learner]).shape)
# print(e)
# print(full_measurements[active_learner][e])
# exit()
############## ACTIVE LEARNING PART ##############
if active_learner == "random":
chosen_indices = active_random(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
elif active_learner == "worstcase":
# chosen_indices = active_worstcase(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
chosen_indices = worstcasesvm_al_learner.forward(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
# chosen_indices = np.arange(int(batch_size)).tolist()
elif active_learner == "svm":
chosen_indices = svm_al_learner.forward(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
elif active_learner == "rf":
chosen_indices = forest_al_learner.forward(xseed, y_mappingseed, yseed, xpool, y_mappingpool, ypool, batch_size)
elif active_learner == "lpm":
# Loss learning module
# train the loss prediction module with the seed
# if epoch <= 1:
optimizer.zero_grad()
optimizer2.zero_grad()
# try:
# Makes predictions detach old loss function (only update over loss prediction module)
kd_mid, trans_mid, ss_mid = kd_mid.detach(), trans_mid.detach(), ss_mid.detach()
predicted_loss = classifier_lpm(kd_mid, trans_mid, ss_mid)
loss2 = lp_criterion(predicted_loss, loss.unsqueeze(1).detach())
loss3 = loss.mean().detach() + loss2
# Computes gradients and updates model
loss3.backward()
optimizer2.step()
# except:
# print("WEIRD ERROR")
optimizer2.zero_grad()
# predict best items to select in the pool
classifier.eval()
# get the model output of the xpools (no training)
_, kd_mid_test, trans_mid_test, ss_mid_test = classifier(xpool.to(device),y_mappingpool.to(device))
kd_mid_test, trans_mid_test, ss_mid_test = kd_mid_test.detach(), trans_mid_test.detach(), ss_mid_test.detach()
predicted_loss_test = classifier_lpm(kd_mid_test, trans_mid_test, ss_mid_test)
# # print(f"Test loss size: {test_loss.shape[0]}, with mean of {test_loss.mean()}")
# true_ranking = np.asarray(yhat_test.unsqueeze(1).cpu().detach().numpy())
predicted_losses_array = np.asarray(predicted_loss_test.cpu().detach().numpy())
if lpm_selection == "original":
# the original loss prediction module
# get the top 10 highest loss indices
top_10_loss_indices = np.argpartition(predicted_losses_array,-batch_size, axis=0)[-batch_size:]
# these are in a list of a list so unpack it
chosen_indices = [i[0] for i in top_10_loss_indices]
elif lpm_selection == "kmeans":
kmeans = KMeans(
init="random",
n_clusters=batch_size*4,
n_init=10,
max_iter=300,
random_state=42
)
scaler = StandardScaler()
scaled_features = scaler.fit_transform(xpool)
kmeans.fit(scaled_features)
# kmeans selection
# print(predicted_losses_array.shape[0])
# print(kmeans.labels_.shape[0])
# make sure that kmeans will work
assert predicted_losses_array.shape[0] == kmeans.labels_.shape[0]
# make a dataframe with two columns, the losses and cluster number of the items in the pool and sort them
# the index number is kept so we have the index of the highest loss items in the top
df1, df2 = pd.DataFrame(predicted_losses_array, columns = ['losses']), pd.DataFrame(kmeans.labels_, columns = ['cluster'])
df3 =
|
pd.concat([df1, df2], axis=1)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 20 21:41:05 2021
@author: 78182
"""
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
def readucr(filename):
data = np.loadtxt(filename, delimiter="\t")
y = data[:, 0]
x = data[:, 1:]
return x, y.astype(int)
root_url = "https://raw.githubusercontent.com/hfawaz/cd-diagram/master/FordA/"
x_train, y_train = readucr(root_url + "FordA_TRAIN.tsv")
x_test, y_test = readucr(root_url + "FordA_TEST.tsv")
classes = np.unique(np.concatenate((y_train, y_test), axis=0))
plt.figure()
for c in classes:
c_x_train = x_train[y_train == c]
plt.plot(c_x_train[0], label="class " + str(c))
plt.legend(loc="best")
plt.show()
plt.close()
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))
classes = np.unique(np.concatenate((y_train, y_test), axis=0))
plt.figure()
for c in classes:
c_x_train = x_train[y_train == c]
plt.plot(c_x_train[0], label="class " + str(c))
plt.legend(loc="best")
plt.show()
plt.close()
num_classes = len(np.unique(y_train))
idx = np.random.permutation(len(X_train))
X_train = X_train[idx]
y_train = y_train[idx]
####using trade_example and df_joined
##Count the number of rows group by time_id
size = trade_example.groupby('time_id').size()
##Get average number of samples mean = 32
mask = size >= 20
mask = mask[mask==True]
true_id = mask.index.tolist()
##mask_trade_example now contains time_id where trades are >= than 20
mask_trade_example = trade_example[trade_example['time_id'].isin(true_id)]
###Now only get the last 20 timestamps
g = mask_trade_example.groupby('time_id')
tail_trade_example = g.tail(20)
tail_time_id = tail_trade_example['time_id'].unique()
tail_row_id = [f'0-%d'%t_id for t_id in tail_time_id]
###prepare df_joined for prediction
df_joined['tag'] = df_joined['pred'] > df_joined['target']
mask_df_joined = df_joined[df_joined['row_id'].isin(tail_row_id)]
X = pd.DataFrame(tail_trade_example.groupby('time_id')['price'].apply(pd.Series.tolist).tolist())
y = mask_df_joined['tag']
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
num_classes = len(np.unique(y_train))
idx = np.random.permutation(len(X_train))
X_train = X_train[idx]
y_train = y_train[idx]
y_train[y_train == False] = 0
y_test[y_test == False] = 0
y_train[y_train == True] = 1
y_test[y_test == True] = 1
X_train = X_train.to_numpy()
X_test = X_test.to_numpy()
X_train = X_train.reshape((X_train.shape[0], X_train.shape[1], 1))
X_test = X_test.reshape((X_test.shape[0], X_test.shape[1], 1))
def make_model(input_shape):
input_layer = keras.layers.Input(input_shape)
conv1 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(input_layer)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.ReLU()(conv1)
conv2 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(conv1)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.ReLU()(conv2)
conv3 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(conv2)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.ReLU()(conv3)
gap = keras.layers.GlobalAveragePooling1D()(conv3)
output_layer = keras.layers.Dense(num_classes, activation="softmax")(gap)
return keras.models.Model(inputs=input_layer, outputs=output_layer)
model = make_model(input_shape= X_train.shape[1:])
keras.utils.plot_model(model, show_shapes=True)
############Training LSTM using last n records
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
import numpy as np
##1. Loading data
trade_example_all = pd.read_parquet('trade_train.parquet')
with open(r"df_joined.pickle", "rb") as input_file:
df_joined = pickle.load(input_file)
##2. Feature Engineering
trade_example_all['avg_order_size'] = trade_example_all['size']/trade_example_all['order_count']
#Remove extreme values of avg_order_size top 1%
mask = trade_example_all['avg_order_size'] < np.percentile(trade_example_all['avg_order_size'],99)
trade_example_all = trade_example_all[mask]
trade_example_all.reset_index(inplace=True)
##3. Preprocess
#3.1 Normalize column avg_order_size MinMax normlize
max_value = trade_example_all['avg_order_size'].max()
min_value = trade_example_all['avg_order_size'].min()
trade_example_all['avg_order_size'] = (trade_example_all['avg_order_size'] - min_value) / (max_value - min_value)
#Only keep last n records for every stock - time_id pair.
##Count the number of rows group by (stock_id,time_id)
size = trade_example_all.groupby(['stock_id','time_id']).size()
##Taking n = 20
last_n = 20
mask = size >= last_n
mask = mask[mask==True]
true_id = mask.index.tolist()
##mask_trade_example now contains time_id where trades are >= than 20.
trade_example_all_mask = pd.Series(list(zip(trade_example_all['stock_id'], trade_example_all['time_id']))).isin(true_id).values
trade_example_all_larger = trade_example_all[trade_example_all_mask]
##trade_example_all_larger now only contains trade record that have length >= 20.
##Get last n records
g = trade_example_all_larger.groupby(['stock_id','time_id'])
tail_trade_example = g.tail(last_n)
#Merging two columns
tail_trade_example["stock_id"] = tail_trade_example["stock_id"].astype(str)
tail_trade_example["time_id"] = tail_trade_example["time_id"].astype(str)
stock_id_list = tail_trade_example["stock_id"].tolist()
time_id_list = tail_trade_example["time_id"].tolist()
row_id = list(set([m+'-'+n for m,n in zip(stock_id_list,time_id_list)]))
###prepare df_joined for prediction
df_joined['tag'] = df_joined['pred'] > df_joined['target']
mask_df_joined = df_joined[df_joined['row_id'].isin(row_id)]
y = mask_df_joined['tag'] * 1
y = y.to_numpy(dtype=np.float)
#Extract 2-dimensional Xs.
X_price = pd.DataFrame(tail_trade_example.groupby(['stock_id','time_id'])['price'].apply(pd.Series.tolist).tolist())
X_aos = pd.DataFrame(tail_trade_example.groupby(['stock_id','time_id'])['avg_order_size'].apply(pd.Series.tolist).tolist())
#np.stack will make them on top of each other, on top is price, on bottom is X_aos
X = np.stack((X_price, X_aos), axis=1)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
num_classes = len(np.unique(y_train))
def make_model(input_shape):
input_layer = keras.layers.Input(input_shape)
conv1 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(input_layer)
conv1 = keras.layers.BatchNormalization()(conv1)
conv1 = keras.layers.ReLU()(conv1)
conv2 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(conv1)
conv2 = keras.layers.BatchNormalization()(conv2)
conv2 = keras.layers.ReLU()(conv2)
conv3 = keras.layers.Conv1D(filters=64, kernel_size=3, padding="same")(conv2)
conv3 = keras.layers.BatchNormalization()(conv3)
conv3 = keras.layers.ReLU()(conv3)
gap = keras.layers.GlobalAveragePooling1D()(conv3)
output_layer = keras.layers.Dense(num_classes, activation="softmax")(gap)
return keras.models.Model(inputs=input_layer, outputs=output_layer)
model = make_model(input_shape= X_train.shape[1:])
keras.utils.plot_model(model, show_shapes=True)
epochs = 500
batch_size = 32
callbacks = [
keras.callbacks.ModelCheckpoint(
"best_model.h5", save_best_only=True, monitor="val_loss"
),
keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.5, patience=20, min_lr=0.0001
),
keras.callbacks.EarlyStopping(monitor="val_loss", patience=50, verbose=1),
]
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"],
)
history = model.fit(
X_train,
y_train,
batch_size=batch_size,
epochs=epochs,
callbacks=callbacks,
validation_split=0.2,
verbose=1,
)
model = keras.models.load_model("best_model.h5")
test_loss, test_acc = model.evaluate(X_test, y_test)
print("Test accuracy", test_acc)
print("Test loss", test_loss)
#Get df_joined which contains y.
def realized_volatility(series_log_return):
return np.sqrt(np.sum(series_log_return**2))
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
df_book_data['wap'] =(df_book_data['bid_price1'] * df_book_data['ask_size1']+df_book_data['ask_price1'] * df_book_data['bid_size1']) / (
df_book_data['bid_size1']+ df_book_data[
'ask_size1'])
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def realized_volatility_per_time_id(file_path, prediction_column_name):
df_book_data = pd.read_parquet(file_path)
df_book_data['wap'] =(df_book_data['bid_price1'] * df_book_data['ask_size1']+df_book_data['ask_price1'] * df_book_data['bid_size1']) / (
df_book_data['bid_size1']+ df_book_data[
'ask_size1'])
df_book_data['log_return'] = df_book_data.groupby(['time_id'])['wap'].apply(log_return)
df_book_data = df_book_data[~df_book_data['log_return'].isnull()]
df_realized_vol_per_stock = pd.DataFrame(df_book_data.groupby(['time_id'])['log_return'].agg(realized_volatility)).reset_index()
df_realized_vol_per_stock = df_realized_vol_per_stock.rename(columns = {'log_return':prediction_column_name})
stock_id = file_path.split('=')[1]
df_realized_vol_per_stock['row_id'] = df_realized_vol_per_stock['time_id'].apply(lambda x:f'{stock_id}-{x}')
return df_realized_vol_per_stock[['row_id',prediction_column_name]]
def past_realized_volatility_per_stock(list_file,prediction_column_name):
df_past_realized = pd.DataFrame()
for file in list_file:
df_past_realized = pd.concat([df_past_realized,
realized_volatility_per_time_id(file,prediction_column_name)])
return df_past_realized
import glob
list_order_book_file_train = glob.glob('book_train.parquet/*')
df_past_realized_train = past_realized_volatility_per_stock(list_file=list_order_book_file_train,
prediction_column_name='pred')
train =
|
pd.read_csv('train.csv')
|
pandas.read_csv
|
from __future__ import print_function
import pandas as pd
import numpy as np
import os
from collections import OrderedDict
from pria_lifechem.function import *
from prospective_screening_model_names import *
from prospective_screening_metric_names import *
def clean_excel():
dataframe = pd.read_excel('../../output/stage_2_predictions/Keck_LC4_backup.xlsx')
dataframe = dataframe.drop(dataframe.index[[8779]])
dataframe.to_excel('../../output/stage_2_predictions/Keck_LC4_export.xlsx', index=None)
def merge_prediction():
dataframe = pd.read_csv('../../dataset/fixed_dataset/pria_prospective.csv.gz')
molecule_ids = dataframe['Molecule'].tolist()
actual_labels = dataframe['Keck_Pria_AS_Retest'].tolist()
inhibits = dataframe['Keck_Pria_Continuous'].tolist()
complete_df = pd.DataFrame({'molecule': molecule_ids, 'label': actual_labels, 'inhibition': inhibits})
column_names = ['molecule', 'label', 'inhibition']
complete_df = complete_df[column_names]
dir_ = '../../output/stage_2_predictions/Keck_Pria_AS_Retest'
model_names = []
for model_name in model_name_mapping.keys():
file_path = '{}/{}.npz'.format(dir_, model_name)
if not os.path.exists(file_path):
continue
print('model: {} exists'.format(model_name))
data = np.load(file_path)
file_path, '\t', data.keys()
y_pred = data['y_pred_on_test'][:, 0]
if y_pred.ndim == 2:
y_pred = y_pred[:, 0]
model_name = model_name_mapping[model_name]
model_names.append(model_name)
complete_df[model_name] = y_pred
print()
model_names = sorted(model_names)
column_names.extend(model_names)
complete_df = complete_df[column_names]
print(complete_df.shape)
complete_df.to_csv('{}/complete_prediction.csv'.format(dir_), index=None)
return
def merge_prediction_old():
dataframe = pd.read_excel('../../output/stage_2_predictions/Keck_LC4_export.xlsx')
molecule_name_list = dataframe['Molecule Name'].tolist()
supplier_id = dataframe['Supplier ID'].tolist()
failed_id = ['F0401-0050', 'F2964-1411', 'F2964-1523']
inhibits = dataframe[
'PriA-SSB AS, normalized for plate and edge effects, correct plate map: % inhibition Alpha, normalized (%)'].tolist()
neo_dataframe = pd.read_csv('../../output/stage_2_predictions/pria_lc4_retest_may18.csv')
failed_molecule_names = neo_dataframe[neo_dataframe['Active'] == 0]['Row Labels'].tolist()
failed_molecule_names += ['SMSSF-0044356', 'SMSSF-0030688']
positive_enumerate = filter(lambda x: x[1] >= 35 and supplier_id[x[0]] not in failed_id and molecule_name_list[x[0]] not in failed_molecule_names, enumerate(inhibits))
positive_idx = map(lambda x: x[0], positive_enumerate)
actual_label = map(lambda x: 1 if x in positive_idx else 0, range(len(supplier_id)))
actual_label = np.array(actual_label)
complete_df = pd.DataFrame({'molecule name': molecule_name_list, 'molecule id': supplier_id, 'label': actual_label, 'inhibition': inhibits})
column_names = ['molecule name', 'molecule id', 'label', 'inhibition']
complete_df = complete_df[column_names]
test_data_df =
|
pd.read_csv('../../dataset/keck_lc4.csv.gz')
|
pandas.read_csv
|
from collections import defaultdict
import pandas as pd
import numpy as np
import logging
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import LeaveOneOut
from sklearn.utils import shuffle
from sklearn import metrics
from module import labeling
file = "./data/metadata/questionnaire.csv"
metadf = pd.read_csv(file, sep=";", index_col="name")
def _video_of(name, nrs):
return list(map(lambda nr: metadf.loc[name]["video%d"%nr], nrs))
classifiers = {
"RandomForest": RandomForestClassifier(
n_estimators=100),
"AdaBoost": AdaBoostClassifier(
n_estimators=100),
"SVC": SVC(gamma='auto'),
"MostFrequent": DummyClassifier(strategy="most_frequent"),
"Random": DummyClassifier(strategy="uniform"),
}
labelConfig = labeling.SimpleConfig() #labeling.RankingThresholdConfig() # RankingThresholdConfig() SimpleConfig() OnlineConfig() ExpertConfig()
testlabelConfig = labeling.SimpleConfig() #labeling.RankingThresholdConfig() # RankingThresholdConfig() SimpleConfig() OnlineConfig()
videoCombis = [
[[1,3], [2,4]],
[[2,4], [1,3]],
[[1,4], [2,3]],
[[2,3], [1,4]],
#[[2,4],[2,4]],
#[[1,2,3,4],[1,2,3,4]]
]
def run_clf(clf, train_x, train_y, test_x, test_y, state):
logging.getLogger('distributed.utils_perf').setLevel(logging.CRITICAL)
clf.random_state = state
model = clf.fit(train_x, train_y)
test_yp = model.predict(test_x)
score = metrics.accuracy_score(test_y, test_yp)
cm = metrics.confusion_matrix(test_y, test_yp, labels=["tense", "relax"]) # ACHTUNG: Geht nur bei diesen zwei Label !!!
verteilung_klassen_true = pd.Series(test_y).value_counts(normalize=True)
verteilung_klassen_pred = pd.Series(test_yp).value_counts(normalize=True)
tn_tense_true, fp_tense_false, fn_relax_false, tp_relax_true = cm.ravel() # tn, fp, fn, tp
info = {}
info["verteilung_klassen_true_relax"] = verteilung_klassen_true["relax"] if "relax" in verteilung_klassen_true else 0
info["verteilung_klassen_true_tense"] = verteilung_klassen_true["tense"] if "tense" in verteilung_klassen_true else 0
info["verteilung_klassen_pred_relax"] = verteilung_klassen_pred["relax"] if "relax" in verteilung_klassen_pred else 0
info["verteilung_klassen_pred_tense"] = verteilung_klassen_pred["tense"] if "tense" in verteilung_klassen_pred else 0
info["tn_tense_true"] = tn_tense_true
info["fp_tense_false"] = fp_tense_false
info["fn_relax_false"] = fn_relax_false
info["tp_relax_true"] = tp_relax_true
importances = {}
probas_df = None
if type(clf) is RandomForestClassifier:
importances = dict(zip(train_x.columns, model.feature_importances_))
test_yp_proba = model.predict_proba(test_x)
proba_classes = model.classes_
# test_x.index = Liste der Video-Abschnitte
# test_yp_proba = Liste mit Tuple, Wahrscheinlich der Klassen
# proba_classes = Reihenfolge der Klassen im proba Tuple
l = []
for proba in test_yp_proba:
l.append(dict(zip(proba_classes, proba)))
probas_df = pd.DataFrame(dict( zip(list(test_x.index), l))).transpose()
return score, cm, info, importances, probas_df
def run_class(data, splits):
logging.getLogger('distributed.utils_perf').setLevel(logging.CRITICAL)
cv_scores = {}
participants = np.array(sorted(set(map(lambda i: i.split("_")[0], data.index))))
train_index = splits[0]
test_index = splits[1]
per_participant_train_size_relax = []
per_participant_train_size_tense = []
per_participant_test_size_relax = []
per_participant_test_size_tense = []
info_df = None
importances_df = None
proba_dfs = []
videos_score = defaultdict(list)
for videos in videoCombis:
videos_train = videos[0]
videos_test = videos[1]
train_p = participants[train_index]
test_p = participants[test_index]
train_x = data.loc[data.index.map(lambda x: any(arg+"_" in x for arg in train_p))]
test_x = data.loc[data.index.map(lambda x: any(arg+"_" in x for arg in test_p))]
train_x = train_x.loc[train_x.index.map(lambda idx: any(video in idx for video in _video_of(idx.split("_")[0], videos_train) ))]
test_x = test_x.loc[test_x.index.map(lambda idx: any(video in idx for video in _video_of(idx.split("_")[0], videos_test) ))]
train_size = None
test_size = None
mean_score = defaultdict(list)
for state in range(10,110,10):
train_x_s = shuffle(train_x, random_state=state)
test_x_s = shuffle(test_x, random_state=state)
train_y = labeling.get_label(train_x_s, labelConfig)
test_y = labeling.get_label(test_x_s, testlabelConfig)
train_size = train_y.value_counts(normalize=False)
test_size = test_y.value_counts(normalize=False)
for clf_name, clf in classifiers.items():
score, cm, info, importances, probas_df = run_clf(clf, train_x_s, train_y, test_x_s, test_y, state)
mean_score[clf_name].append(score)
if clf_name is "RandomForest":
if info_df is None:
info_df = pd.DataFrame(info, index=['42',])
importances_df = pd.DataFrame([importances.values()], columns=importances.keys())
else:
info_df = info_df.append(info, ignore_index=True)
importances_df = importances_df.append(pd.DataFrame([importances.values()], columns=importances.keys()), ignore_index=True)
proba_dfs.append(probas_df)
per_participant_train_size_relax.append(train_size["relax"] if "relax" in train_size else 0)
per_participant_train_size_tense.append(train_size["tense"] if "tense" in train_size else 0)
per_participant_test_size_relax.append(test_size["relax"] if "relax" in test_size else 0)
per_participant_test_size_tense.append(test_size["tense"] if "tense" in test_size else 0)
for clf, scores in mean_score.items():
videos_score[clf].append(np.mean(scores))
#test_p[0], videos_test, np.mean(mean_score)
for clf, scores in videos_score.items():
cv_scores[clf] = np.mean(scores)
#print(test_p[0], clf, np.mean(scores))
result = {}
result["participant"] = participants[test_index]
result["scores"] = cv_scores
result["train_size_relax"] = np.mean(per_participant_train_size_relax)
result["train_size_tense"] = np.mean(per_participant_train_size_tense)
result["test_size_realx"] = np.mean(per_participant_test_size_relax)
result["test_size_tense"] = np.mean(per_participant_test_size_tense)
result["info"] = info_df.mean()
result["importances"] = importances_df.mean()
result["probas"] = pd.concat(proba_dfs).groupby(level=0).mean()
return result
class Classification():
def __init__(self):
logging.getLogger('distributed.utils_perf').setLevel(logging.CRITICAL)
def execute(self, data):
print("Make Classification with ", labelConfig)
participants = np.array(sorted(set(map(lambda i: i.split("_")[0], data.index))))
splits = list(LeaveOneOut().split(participants))
datas = [data for i in range(len(splits))]
big_future_data = self.client.scatter(datas)
futures = self.client.map(run_class, big_future_data, splits)
all_cv_results = self.client.gather(futures)
cv_scores = defaultdict(list)
train_size_relax = []
train_size_tense = []
test_size_realx = []
test_size_tense = []
for dict_clf_score in all_cv_results:
for name, score in dict_clf_score["scores"].items():
cv_scores[name].append(score)
train_size_relax.append(dict_clf_score["train_size_relax"])
train_size_tense.append(dict_clf_score["train_size_tense"])
test_size_realx.append(dict_clf_score["test_size_realx"])
test_size_tense.append(dict_clf_score["test_size_tense"])
#for split in splits:
# dict_clf_score = run_class(split, data)
# for name, score in dict_clf_score.items():
# cv_scores[name].append(score)
results = {}
for name, scores in cv_scores.items():
print(name, np.mean(scores), np.std(scores))
results[name] = [np.mean(scores), np.std(scores)]
sizes = {}
sizes["train_size_relax"] = np.mean(train_size_relax)
sizes["train_size_tense"] = np.mean(train_size_tense)
sizes["test_size_realx"] = np.mean(test_size_realx)
sizes["test_size_tense"] = np.mean(test_size_tense)
#### FEHLER::::
### ICH HAB DIE UNTERSCHIEDLICHEN CLASSIFIER NICHT BEACHTET bei der INFO!!!
### Am besten einfach nur den RandomForest verwenden
info_dataframe = None
for result in all_cv_results:
result["info"]["score"] = result["scores"]["RandomForest"]
if info_dataframe is None:
info_dataframe = pd.DataFrame(result["info"], columns=[result["participant"]]).transpose()
else:
info_dataframe = info_dataframe.append(pd.DataFrame(result["info"], columns=[result["participant"]]).transpose())
importances_dataframe = None
for result in all_cv_results:
if importances_dataframe is None:
importances_dataframe =
|
pd.DataFrame(result["importances"], columns=[result["participant"]])
|
pandas.DataFrame
|
#You do not talk about Fight Club
import os
os.chdir('....\\data\\input.csv')
#You DO NOT talk about Fight Club
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from fancyimpute import KNN
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
#Only two guys to a fight
train =
|
pd.read_csv('train.csv')
|
pandas.read_csv
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from itertools import product
import pandas as pd
from covsirphy.util.term import Term
from covsirphy.ode.mbase import ModelBase
from covsirphy.automl.autots_predictor import _AutoTSPredictor
class AutoMLHandler(Term):
"""
Predict ODE parameter values automatically with machine learning.
Args:
X (pandas.DataFrame):
Index
pandas.Timestamp: Observation date
Columns
observed variables (int or float)
Y (pandas.DataFrame):
Index
pandas.Timestamp: Observation date
Columns
observed ODE parameter values (float)
model (covsirphy.ModelBase): ODE model
days (int): days to predict
kwargs: keyword arguments of autots.AutoTS.
Note:
When X is a empty dataframe, only "univariate" can be used as @method with AutoHandler.predict().
"""
_LIKELY = "Likely"
_UPPER = "Upper"
_LOWER = "Lower"
def __init__(self, X, Y, model, days, **kwargs):
self._model = self._ensure_subclass(model, ModelBase, name="model")
self._X = self._ensure_dataframe(X, name="X", time_index=True, empty_ok=True)
self._Y = self._ensure_dataframe(Y, name="Y", time_index=True, empty_ok=False, columns=model.PARAMETERS)
self._days = self._ensure_natural_int(days, name="days")
self._kwargs = kwargs.copy()
self._pred_df = pd.DataFrame(columns=[self.SERIES, self.DATE, *Y.columns.tolist()])
def predict(self, method):
"""
Perform automated machine learning to predict values.
Args:
method (str): machine learning method name, "univariate" or "multivariate_regression"
Returns:
AutoMLHandler: self
Note:
Models used by "univariate" can be checked with from autots.models.model_list import model_lists; model_list["univariate"].
Note:
Model used by "multivariate_regression" is Multivariate Regression.
"""
method_dict = {
"univariate": self._univariate,
"multivariate_regression": self._multivariate_regression,
}
if method not in method_dict:
raise KeyError(
f"Un-expected method: {method}. Supported methods are {', '.join(list(method_dict.keys()))}.")
self._register_scenarios(method, *method_dict[method]())
return self
def summary(self):
"""
Create and summarize the scenarios.
Returns:
pandas.DataFrame:
Index
reset index
Columns
- Scenario (str): scenario name, "Univariate_Likely", "Univariate_1" etc.
- Start (pandas.Timestamp): start date of the phase
- End (pandas.Timestamp): end date of the phase
- Rt (float): phase-dependent reproduction number
- columns of Y data
Note:
"Univariate_Likely" scenario is the most likely scenario when univariate forcasting is used.
Note:
"Univariate_01" scenario is the created with upper values of ODE parameter values.
Note:
"Univariate_16" scenario is the created with lower values of ODE parameter values. (if the model has four parameters)
Note:
Dates with the same Rt values at the 1st decimal place will be merged to one phase.
"""
df = self._pred_df.copy()
# Calculate reproduction number to create phases
df["param"] = df[self._model.PARAMETERS].to_dict(orient="records")
df[self.RT] = df.apply(lambda x: self._model(population=100, **x["param"]).calc_r0(), axis=1).round(1)
# Get start/end date
criteria = [self.SERIES, self.RT]
df = df.groupby(criteria).first().join(df[[*criteria, self.DATE]].groupby(criteria).last(), rsuffix="_last")
df = df.rename(columns={self.DATE: self.START, f"{self.DATE}_last": self.END})
df = df.reset_index().loc[:, [self.SERIES, self.START, self.END, self.RT, *self._model.PARAMETERS]]
return df.sort_values([self.SERIES, self.START], ignore_index=True)
def _register_scenarios(self, method, likely_df, upper_df, lower_df):
"""
Create and register scenario with the most likely values, upper values and lower values.
Args:
method (str): machine learning method name
likely_df (pandas.DataFrame): the most likely values with a forcasting method
Index
Date (pandas.Timestamp): observation date
Columns
predicted values (float)
upper_df (pandas.DataFrame): the upper values with a forcasting method
Index
Date (pandas.Timestamp): observation date
Columns
predicted values (float)
lower_df (pandas.DataFrame): the lower values with a forcasting method
Index
Date (pandas.Timestamp): observation date
Columns
predicted values (float)
"""
# The most likely scenario
df = likely_df.loc[:, self._Y.columns]
df.index.name = self.DATE
df = df.reset_index()
df[self.SERIES] = f"{method.capitalize()}_{self._LIKELY}"
dataframes = [df]
# Upper/Lower
ul_df = upper_df.loc[:, self._Y.columns].join(
lower_df.loc[:, self._Y.columns], lsuffix=f"_{self._UPPER}", rsuffix=f"_{self._LOWER}")
col_products = product(
*([f"{param}_{suffix}" for suffix in (self._UPPER, self._LOWER)] for param in self._Y.columns))
for (i, col_product) in enumerate(col_products):
df = ul_df.loc[:, col_product]
df.rename(
columns=lambda x: x.replace(self._UPPER, "").replace(self._LOWER, "").replace("_", ""), inplace=True)
df[self.SERIES] = f"{method.capitalize()}_{i:02}"
df.index.name = self.DATE
dataframes.append(df.reset_index())
self._pred_df =
|
pd.concat([self._pred_df, *dataframes], axis=0)
|
pandas.concat
|
import glob
import dask
import pandas as pd
col_widths = [3, 4, 12, 4, 6, 6, 5, 7, 6, 6, 4]
cols = [
"basin",
"number",
"init",
"technum",
"tech",
"tau",
"lat",
"lon",
"vmax",
"mslp",
"cat"
]
def lat_lon_to_num(string):
value = pd.to_numeric(string[:-1], errors="coerce") / 10
if string.endswith(("N", "E")):
return value
else:
return -value
@dask.delayed()
def process_df(file):
df =
|
pd.read_fwf(file, widths=col_widths, names=cols, header=None, na_value="nan")
|
pandas.read_fwf
|
import numpy as np
import pandas as pd
from helpers import d2df
from helpers import polyvalHelperFunction
import CoolProp.CoolProp as cp
def operationalModeCalculator(processed, raw, CONSTANTS, hd):
# This function assigns to each point an operational mode based on a set of assumptions
# Initializing the data series
temp = pd.Series(index=processed.index)
temp[raw[hd["SHIP_SPEED_KNOT_"]] > 15] = "High Speed Sailing"
temp[(raw[hd["SHIP_SPEED_KNOT_"]] < 15) * (raw[hd["SHIP_SPEED_KNOT_"]] > 4)] = "Low Speed Sailing"
temp[((processed["Demands:Electricity:Thrusters:Edot"] > 0) + ((raw[hd["SHIP_SPEED_KNOT_"]] < 4) * (raw[hd["SHIP_SPEED_KNOT_"]] > 2)))] = "Maneuvering"
temp[temp.isnull()] = "Port/Stay"
processed["OperationalMode"] = temp
return processed
def seasonCalculator(processed):
# This function assigns to each point a season identifier based on a set of assumptions
temp = pd.Series(index=processed.index)
temp[processed["T_0"] < 280] = "Winter"
temp[processed["T_0"] >= 280] = "Mid-Season"
temp[processed["Demands:Electricity:HVAC:Edot"] > 0] = "Summer"
processed["Season"] = temp
return processed
def engineStatusCalculation(type, raw, processed, CONSTANTS, hd, dict_structure):
for system in CONSTANTS["General"]["NAMES"][type]:
processed[system + ":" + "on"] = raw[hd[system+"-TC__RPM_"]] > 5000
for unit in dict_structure["systems"][system]["units"]:
processed[system + ":" + unit + ":on"] = processed[system + ":" + "on"]
return processed
def engineLoadCalculation(type, raw, processed, CONSTANTS, hd):
for system in CONSTANTS["General"]["NAMES"][type]:
processed[system+":"+"load"] = processed[d2df(system,"Cyl","Power_out","Edot")] / CONSTANTS[type]["MCR"]
return processed
def bsfcISOCorrection(bsfc_ISO, charge_air_temp, charge_air_cooling_temp, fuel_temp, CONSTANTS):
# This function calculates the "real" BSFC starting from the ISO corrected one and from measurements of
# - Charge air temperature [K]
# - Charge air coolant temperature [K]
# - Fuel LHV [MJ/kg]
# - Mechanical efficiency (often assumed at 0.8)
# Assigning the value of the LHV depending on the fuel temperature
LHV = pd.Series(0,index=charge_air_temp.index)
LHV[fuel_temp < 70] = CONSTANTS["General"]["MDO"]["LHV"] # If T_fuel<70, it is Diesel
LHV[fuel_temp >= 70] = CONSTANTS["General"]["HFO"]["LHV"] # If T_fuel>70, it is HFO
# Converting existing data (expected in the form of dataSeries
if isinstance(charge_air_temp,pd.Series):
T_ca = charge_air_temp.values
else:
print("Error: Expecting a pandas data series as data type")
if isinstance(charge_air_cooling_temp,pd.Series):
T_lt = charge_air_cooling_temp.values
else:
print("Error: Expecting a pandas data series as data type")
# Providing reference values for the variables
k = (CONSTANTS["General"]["ISO"]["T_CA"] / T_ca)**1.2 * (CONSTANTS["General"]["ISO"]["T_LT"] / T_lt)
alpha = k - 0.7 * (1 - k) * (1/CONSTANTS["General"]["ISO"]["ETA_MECH"] - 1)
beta = k / alpha
# Final calculation of the BSFC
bsfc = bsfc_ISO * CONSTANTS["General"]["ISO"]["LHV"] / LHV * beta
return (bsfc, LHV)
def mixtureComposition(mdot_eg,mdot_fuel,temp_fuel,CONSTANTS):
# This value takes as input the flow of air and fuel and calculates the resulting composition of the exhaust gas, assuming full combustion
# The composition is written in the code accepted by CoolProp, i.e. in the form:
# "HEOS::COMP_1[%]&COMP_2[%]..."
# Accepted components are: N2, O2, CO2, H2O (SO2?)
# The composition is a dataframe of 4 columns, in the order above
mdot_air = mdot_eg - mdot_fuel
mixture = pd.Series(index=mdot_air.index)
fuel_C_x = pd.Series(0,index=mdot_air.index)
fuel_H_x = pd.Series(0,index=mdot_air.index)
# Reading from the data the mass composition of the fuel, depending on its temperature
fuel_C_x[temp_fuel < 70] = CONSTANTS["General"]["MDO"]["C"] # If T_fuel<70, it is Diesel
fuel_C_x[temp_fuel >= 70] = CONSTANTS["General"]["HFO"]["C"] # If T_fuel>70, it is HFO
fuel_H_x[temp_fuel < 70] = CONSTANTS["General"]["MDO"]["H"] # If T_fuel<70, it is Diesel
fuel_H_x[temp_fuel >= 70] = CONSTANTS["General"]["HFO"]["H"] # If T_fuel>70, it is HFO
# Calculating the elemental molar flows
fuel_C_molfr = mdot_fuel * fuel_C_x / 12
fuel_H_molfr = mdot_fuel * fuel_H_x
air_N2_molfr = mdot_air * 0.77 / 28
air_O2_molfr = mdot_air * 0.23 / 32
output_CO2_molfr = fuel_C_molfr
output_H2O_molfr = fuel_H_molfr / 2
output_N2_molfr = air_N2_molfr
output_O2_molfr = air_O2_molfr - output_CO2_molfr - output_H2O_molfr / 2
# Finally, calculating the compositions
tot_molfr = output_CO2_molfr + output_H2O_molfr + output_N2_molfr + output_O2_molfr
tot_molfr[tot_molfr==0] = 1 # Just to avoid N2comp and so == NaN
O2 = output_O2_molfr / tot_molfr
H2O = output_H2O_molfr / tot_molfr
CO2 = output_CO2_molfr / tot_molfr
N2 = 1 - O2 - H2O - CO2
for idx in mdot_air.index:
mixture[idx] = "HEOS::" + "N2[" + str(N2[idx]) + "]&" + "O2[" + str(O2[idx]) + "]&" + "H2O[" + str(H2O[idx]) + "]&" + "CO2[" + str(CO2[idx]) + "]"
return mixture
def mixtureCompositionNew(mdot_tot,mdot_fuel,temp_fuel,CONSTANTS):
# This value takes as input the flow of air and fuel and calculates the resulting composition of the exhaust gas, assuming full combustion
# The composition is written in the code accepted by CoolProp, i.e. in the form:
# "HEOS::COMP_1[%]&COMP_2[%]..."
# Accepted components are: N2, O2, CO2, H2O (SO2?)
# The composition is a dataframe of 4 columns, in the order above
mdot_air = mdot_tot - mdot_fuel
fuel_C_x =
|
pd.Series(0,index=mdot_air.index)
|
pandas.Series
|
'''
This code will clean the OB datasets and combine all the cleaned data into one
Dataset name: O-17-Mateus Bavaresco
1. two excel files for room 1 and room 2
2. each excel file has multiple sheets in it
3. extract different information from the excel file
4. store data in the templates
'''
import os
import glob
import datetime
import pandas as pd
# specify the path
data_path = "D:/yapan_office_D/Data/Annex-79-OB-Database/2021-05-28-1130-raw-data/Annex 79 Data Collection/O-17-Mateus Bavaresco/_yapan_processing/"
template_path = 'D:/yapan_office_D/Data/Annex-79-OB-Database/OB Database Consolidation/Templates/'
begin_time = datetime.datetime.now()
'''
1. read the two excel files into pandas and clean the data
'''
# read the data from excel all in one
combined_room1 = pd.ExcelFile(data_path + 'Room 01 modified.xlsx')
combined_room2 = pd.ExcelFile(data_path + 'Room 02 modified.xlsx')
# parse the data
sheet_names1 = combined_room1.sheet_names # get the sheet names in the excel file
sheet_names2 = combined_room2.sheet_names # get the sheet names in the excel file
# filter out the desired data and combine them
window1 = list(filter(lambda name: 'Wind-' in name, sheet_names1))
light1 = list(filter(lambda name: 'Light-' in name, sheet_names1))
ac1 = list(filter(lambda name: 'AC-' in name, sheet_names1))
indoor1 = list(filter(lambda name: 'Central' in name, sheet_names1))
outdoor1 = list(filter(lambda name: 'Outdoor' in name, sheet_names1))
window2 = list(filter(lambda name: 'Window' in name, sheet_names2))
light2 = list(filter(lambda name: 'Light-' in name, sheet_names2))
indoor2 = list(filter(lambda name: 'Central' in name, sheet_names2))
outdoor2 = list(filter(lambda name: 'Outdoor' in name, sheet_names2))
''' 2. Data Processing'''
# read templates into pandas
template_window = pd.read_csv(template_path+'Window_Status.csv')
template_light = pd.read_csv(template_path+'Ligthing_Status.csv')
template_hvac = pd.read_csv(template_path+'HVAC_Measurement.csv')
template_indoor = pd.read_csv(template_path+'Indoor_Measurement.csv')
template_outdoor = pd.read_csv(template_path+'Outdoor_Measurement.csv')
''' 2.1 Window_Status.csv '''
# read and combine data by category and add IDs when combining
window_combined = pd.DataFrame()
# combine data from room 1 and assign room ID
for index, name in enumerate(window1):
temp_df = pd.read_excel(combined_room1, sheet_name=name)
temp_df['Window_ID'] = index+1
temp_df['Room_ID'] = 1
window_combined = pd.concat([window_combined, temp_df], ignore_index=True)
# print(index)
# combine data from room 2 and assign room ID
for index, name in enumerate(window2):
temp_df = pd.read_excel(combined_room2, sheet_name=name)
temp_df['Window_ID'] = index+1
temp_df['Room_ID'] = 2
window_combined = pd.concat([window_combined, temp_df], ignore_index=True)
# print(index)
# this column has mixed datetime and string data, convert all to datetime
window_combined.DATE = pd.to_datetime(window_combined['DATE'], infer_datetime_format=True)
# combine date and time columns together
window_combined['Date_Time'] = window_combined['DATE'].astype(str) + ' ' + window_combined['TIME'].astype(str) # add date and time
window_combined['Date_Time'] = pd.to_datetime(window_combined['Date_Time'], format="%Y-%m-%d %H:%M:%S") # convert to datetime
window_combined = window_combined[['Date_Time', 'STATUS', 'Window_ID', 'Room_ID']] # re-order columns
window_combined.columns = ['Date_Time', 'Window_Status', 'Window_ID', 'Room_ID'] # rename the column names
window_combined = window_combined.replace(['OPEN', 'open', 'CLOSED', 'Closed'], [1, 1, 0, 0], inplace=False) # convert window status to values
window_combined['Window_Status'].unique() # check if all the text has been replaced
# concat the combined data to the template
template_window = pd.concat([template_window, window_combined], ignore_index=True)
# assign data type to each columns
# template_window.dtypes
template_window['Window_Status_ID'] = ''
template_window['Window_Status'] = template_window['Window_Status'].astype(int)
template_window['Window_ID'] = template_window['Window_ID'].astype(int)
template_window['Room_ID'] = template_window['Room_ID'].astype(int)
# sort the dataframe
# cannot sort by three columns by ascending, because of the Date_Time
# template_window.sort_values(by=['Date_Time', 'Window_ID', 'Room_ID'], ascending=True)
# check missing values, and sum missing value count by column
print('Check missing values in : window_combined')
print(template_window.isnull().sum())
# save
# save Window_Status.csv
template_window.to_csv(data_path+'Window_Status.csv ', index=False)
''' 2.2 Ligthing_Status.csv '''
# read and combine data by category and add IDs when combining
light_combined = pd.DataFrame()
# combine data from room 1 and assign room ID
for index, name in enumerate(light1):
temp_df = pd.read_excel(combined_room1, sheet_name=name)
temp_df['Lighting_Zone_ID'] = index+1
temp_df['Room_ID'] = 1
light_combined = pd.concat([light_combined, temp_df], ignore_index=True)
# print(index)
# combine data from room 2 and assign room ID
for index, name in enumerate(light2):
temp_df = pd.read_excel(combined_room2, sheet_name=name)
temp_df['Lighting_Zone_ID'] = index+1
temp_df['Room_ID'] = 2
light_combined = pd.concat([light_combined, temp_df], ignore_index=True)
# print(index)
# this column has mixed datetime and string data, convert all to datetime
light_combined.DATE = pd.to_datetime(light_combined['DATE'], infer_datetime_format=True)
# combine date and time columns together
light_combined['Date_Time'] = light_combined['DATE'].astype(str) + ' ' + light_combined['TIME'].astype(str) # add date and time
light_combined['Date_Time'] = pd.to_datetime(light_combined['Date_Time'], format="%Y-%m-%d %H:%M:%S") # convert to datetime
light_combined = light_combined[['Date_Time', 'STATUS', 'Lighting_Zone_ID', 'Room_ID']] # re-order columns
light_combined.columns = ['Date_Time', 'Ligthing_Status', 'Lighting_Zone_ID', 'Room_ID'] # rename the column names
light_combined['Ligthing_Status'].unique() # check if all the text has been replaced
light_combined = light_combined.replace(['ON', 'OFF'], [1, 0], inplace=False) # convert window status to values
light_combined['Ligthing_Status'].unique() # check if all the text has been replaced
# concat the combined data to the template
template_light = pd.concat([template_light, light_combined], ignore_index=True)
# assign data type to each columns
# template_light.dtypes
template_light['Lighting_Status_ID'] = ''
template_light['Ligthing_Status'] = template_light['Ligthing_Status'].astype(int)
template_light['Lighting_Zone_ID'] = template_light['Lighting_Zone_ID'].astype(int)
template_light['Room_ID'] = template_light['Room_ID'].astype(int)
# sort the dataframe
# cannot sort by three columns by ascending, because of the Date_Time
# template_light.sort_values(by=['Date_Time', 'Window_ID', 'Room_ID'], ascending=True)
# check missing values, and sum missing value count by column
print('Check missing values in : light_combined')
print(template_light.isnull().sum())
# save
# save Window_Status.csv
template_light.to_csv(data_path+'Ligthing_Status.csv ', index=False)
''' 2.3 HVAC_Measurement.csv '''
# template_hvac; 'HVAC_Measurement.csv'
# only room 1 has hvac measurement data
# read and combine data by category and add IDs when combining
hvac_combined = pd.DataFrame()
# combine data from room 1 and assign room ID
for index, name in enumerate(ac1):
temp_df = pd.read_excel(combined_room1, sheet_name=name)
temp_df['HVAC_Zone_ID'] = int(name[-1]) # ac 1,2,4; ac3 is missing
temp_df['Room_ID'] = 1
hvac_combined = pd.concat([hvac_combined, temp_df], ignore_index=True)
# print(index)
# this column has mixed datetime and string data, convert all to datetime
hvac_combined.DATE = pd.to_datetime(hvac_combined['DATE'], infer_datetime_format=True)
# combine date and time columns together
hvac_combined['Date_Time'] = hvac_combined['DATE'].astype(str) + ' ' + hvac_combined['TIME'].astype(str) # add date and time
hvac_combined['Date_Time'] = pd.to_datetime(hvac_combined['Date_Time'], format="%Y-%m-%d %H:%M:%S") # convert to datetime
hvac_combined = hvac_combined[['Date_Time', 'STATUS', 'HVAC_Zone_ID', 'Room_ID']] # re-order columns
hvac_combined.columns = ['Date_Time', 'Cooling_Status', 'HVAC_Zone_ID', 'Room_ID'] # rename the column names
hvac_combined['Cooling_Status'].unique() # check if all the text has been replaced
hvac_combined = hvac_combined.replace(['ON', 'OFF'], [1, 0], inplace=False) # convert window status to values
hvac_combined['Cooling_Status'].unique() # check if all the text has been replaced
# concat the combined data to the template
template_hvac = pd.concat([template_hvac, hvac_combined], ignore_index=True)
# check missing values, and sum missing value count by column
print('Check missing values in : hvac_combined')
print(template_hvac.isnull().sum())
# no missing values in the combined raw data
# assign data type to each columns
# template_hvac.dtypes
template_hvac = template_hvac.fillna('')
template_hvac['Cooling_Status'] = template_hvac['Cooling_Status'].astype(int)
template_hvac['HVAC_Zone_ID'] = template_hvac['HVAC_Zone_ID'].astype(int)
template_hvac['Room_ID'] = template_hvac['Room_ID'].astype(int)
# sort the dataframe
# cannot sort by three columns by ascending, because of the Date_Time
# template_hvac.sort_values(by=['Date_Time', 'Window_ID', 'Room_ID'], ascending=True)
# check missing values, and sum missing value count by column
print('Check missing values in : hvac_combined')
print(template_hvac.isnull().sum())
# save
# save Window_Status.csv
template_hvac.to_csv(data_path+'HVAC_Measurement.csv ', index=False)
''' 2.4 Indoor_Measurement.csv '''
# template_indoor; 'Indoor_Measurement.csv'
# read and combine data by category and add IDs when combining
indoor_combined = pd.DataFrame()
# combine data from room 1 and assign room ID
for index, name in enumerate(indoor1):
temp_df = pd.read_excel(combined_room1, sheet_name=name)
temp_df.columns = ['DATE', 'TIME', 'Indoor_Temp', 'Indoor_RH'] # indoor 1 and indoor 2 have different column names
temp_df['Room_ID'] = 1
indoor_combined = pd.concat([indoor_combined, temp_df], ignore_index=True)
# print(index)
''' Room 1'''
# indoor 1 TIME column has different format of timestamp
# format time
indoor_combined['DATE'] =
|
pd.to_datetime(indoor_combined['DATE'], infer_datetime_format=True)
|
pandas.to_datetime
|
"""
Analysis by Term
==========================================================================
"""
import ipywidgets as widgets
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import display
from nltk.corpus.reader.ieer import documents
import techminer.core.dashboard as dash
from techminer.core import (
DASH,
add_counters_to_axis,
corpus_filter,
explode,
limit_to_exclude,
sort_axis,
sort_by_axis,
)
from techminer.plots import (
bar_plot,
barh_plot,
pie_plot,
stacked_bar,
stacked_barh,
treemap,
wordcloud_,
worldmap,
)
# from techminer.core.dashboard import max_items, min_occurrence
###############################################################################
##
## MODEL
##
###############################################################################
class Model:
def __init__(
self,
data,
limit_to,
exclude,
years_range,
clusters=None,
cluster=None,
):
if years_range is not None:
initial_year, final_year = years_range
data = data[(data.Year >= initial_year) & (data.Year <= final_year)]
#
# Filter for cluster members
#
if clusters is not None and cluster is not None:
data = corpus_filter(data=data, clusters=clusters, cluster=cluster)
self.data = data
self.limit_to = limit_to
self.exclude = exclude
self.column = None
self.top_by = None
self.sort_by = None
self.ascending = None
self.cmap = None
self.height = None
self.width = None
self.view = None
def core_source_titles(self):
x = self.data.copy()
x["Num_Documents"] = 1
x = explode(
x[
[
"Source_title",
"Num_Documents",
"ID",
]
],
"Source_title",
)
m = x.groupby("Source_title", as_index=True).agg(
{
"Num_Documents": np.sum,
}
)
m = m[["Num_Documents"]]
m = m.groupby(["Num_Documents"]).size()
w = [str(round(100 * a / sum(m), 2)) + " %" for a in m]
m = pd.DataFrame(
{"Num Sources": m.tolist(), "%": w, "Documents published": m.index}
)
m = m.sort_values(["Documents published"], ascending=False)
m["Acum Num Sources"] = m["Num Sources"].cumsum()
m["% Acum"] = [
str(round(100 * a / sum(m["Num Sources"]), 2)) + " %"
for a in m["Acum Num Sources"]
]
m["Tot Documents published"] = m["Num Sources"] * m["Documents published"]
m["Num Documents"] = m["Tot Documents published"].cumsum()
m["Tot Documents"] = m["Num Documents"].map(
lambda w: str(round(w / m["Num Documents"].max() * 100, 2)) + " %"
)
bradford1 = int(len(self.data) / 3)
bradford2 = 2 * bradford1
m["Bradford's Group"] = m["Num Documents"].map(
lambda w: 3 if w > bradford2 else (2 if w > bradford1 else 1)
)
m = m[
[
"Num Sources",
"%",
"Acum Num Sources",
"% Acum",
"Documents published",
"Tot Documents published",
"Num Documents",
"Tot Documents",
"Bradford's Group",
]
]
m = m.reset_index(drop=True)
return m
def core_authors(self):
x = self.data.copy()
##
## Num_Documents per Author
##
x["Num_Documents"] = 1
x = explode(
x[
[
"Authors",
"Num_Documents",
"ID",
]
],
"Authors",
)
result = x.groupby("Authors", as_index=True).agg(
{
"Num_Documents": np.sum,
}
)
z = result
authors_dict = {
author: num_docs
for author, num_docs in zip(z.index, z.Num_Documents)
if not
|
pd.isna(author)
|
pandas.isna
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().system(' pip install folium')
# In[2]:
# basic imports
import matplotlib.pyplot as plt
import numpy as np
import math as mt
import csv
import pandas as pd
from pandas.tools import plotting
import folium
import warnings # supprssion des warnings pandas
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# In[3]:
# scikit-learn imports
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
# In[4]:
# running the csv files
f8 = "/home/utilisateur/SIMPLON_2019/projet_04_06_2019/valeursfoncieres_2018.csv"
d8 = pd.read_csv(f8,header=0, parse_dates=True)
# In[5]:
f7 = "/home/utilisateur/SIMPLON_2019/projet_04_06_2019/valeursfoncieres_2017.csv"
d7 = pd.read_csv(f7,header=0, parse_dates=True)
# In[6]:
d8.head()
# In[7]:
# adding both files together
d= pd.concat([d7,d8])
# In[8]:
# shows the first five rows
d.head()
# In[9]:
# extracting ZIP codes
l0 = d[d['code_postal']==69000]
# In[10]:
l1=d[d['code_postal']==69001]
# # Extraction de données concernant les arrondissements de Lyon 18 et 17
# In[11]:
l2=d[d['code_postal']==69002]
l3=d[d['code_postal']==69003]
l4=d[d['code_postal']==69004]
l5=d[d['code_postal']==69005]
l6=d[d['code_postal']==69006]
l7=d[d['code_postal']==69007]
l8=d[d['code_postal']==69008]
l9=d[d['code_postal']==69009]
l10=d[d['code_postal']==69010]
# In[12]:
# regrouping all info on boroughs in one dataframe
df_17_18 = pd.concat([l1, l2, l3, l4, l5, l6, l7, l9], ignore_index=True)
df_17_18.head()
# # Extraction des variables pertinentes
# In[13]:
# choosing which variables will be used, they are then added to a dataframe
ly_1718= df_17_18[["date_mutation","valeur_fonciere","adresse_code_voie","code_postal","type_local","surface_reelle_bati","nombre_pieces_principales","latitude","longitude"]]
# In[14]:
# removing empty data
ly_1718_clean= ly_1718.dropna()# clear missing values by columns
# In[15]:
# calculating real estate value per square meter
ly_1718_clean['surface_au_mettre_carre'] = ly_1718_clean['valeur_fonciere']/ly_1718_clean['surface_reelle_bati']
# In[16]:
# shows statistical data of the dataframe
print(ly_1718_clean['valeur_fonciere'].describe(include='all'))
# In[17]:
# extracting only the appartments becuas that is teh data we need
l_appart=ly_1718_clean[ly_1718_clean['type_local']=='Appartement']
# In[18]:
# shows statistical data of specific estate types (smmal, big spaces)
print(l_appart['type_local'].describe(include='all'))
# In[19]:
# shows statistical data of real estate value
print(l_appart['valeur_fonciere'].describe(include='all'))
# In[20]:
# shows statistical data of floor surface
print(l_appart['surface_reelle_bati'].describe(include='all'))
# In[ ]:
# In[21]:
# representation of real estate value and floor surface
l_appart.plot(x='surface_reelle_bati', y='valeur_fonciere', style='o')
plt.title('surface_reelle_bati vs valeur_fonciere')
plt.xlabel('surface_reelle_bati')
plt.ylabel('valeur_fonciere')
plt.show()
# In[22]:
# shows distribution of the real estate value
plt.figure(figsize=(15,10))
plt.tight_layout()
sns.distplot(l_appart['valeur_fonciere'])
# In[23]:
# removing value (price)
l_appart_neat = l_appart[l_appart["valeur_fonciere"]<2500000]
# In[24]:
# removing value (floor surface)
l_ap_bon = l_appart_neat[l_appart_neat["surface_reelle_bati"]<260]
# In[25]:
# representation of real estate value and floor surface
l_ap_bon.plot(x='surface_reelle_bati', y='valeur_fonciere', style='o')
plt.title('surface_reelle_bati vs valeur_fonciere')
plt.xlabel('surface_reelle_bati')
plt.ylabel('valeur_fonciere')
plt.show()
# In[26]:
# removing value (price)
l_appart_neat = l_appart[l_appart["valeur_fonciere"]<1500000]
# In[27]:
# removinf value (floor surface)
l_ap_bon = l_appart_neat[l_appart_neat["surface_reelle_bati"]<250]
# In[28]:
# representation of real estate value and floor surface
l_ap_bon.plot(x='surface_reelle_bati', y='valeur_fonciere', style='o')
plt.title('surface_reelle_bati vs valeur_fonciere')
plt.xlabel('surface_reelle_bati')
plt.ylabel('valeur_fonciere')
plt.show()
# In[29]:
# statistics of floor surface after the changes previously applied
print(l_ap_bon['surface_reelle_bati'].describe(include='all'))
# In[30]:
# statistics of floor surface after the changes previously applied
print(l_ap_bon['surface_au_mettre_carre'].describe(include='all'))
# In[31]:
# shows number of values
l_ap_bon.shape
# In[33]:
# extraction
x = l_ap_bon.iloc[:, [5]] # surface réelle bati
y = l_ap_bon.iloc[:, [6]] # nombre de pièces
z = l_ap_bon.iloc[:, [1]] # valeur fonctière
# In[34]:
# creating df with only two variables
dx=
|
pd.DataFrame(l_ap_bon, columns=['surface_reelle_bati','nombre_pieces_principales'])
|
pandas.DataFrame
|
from tutorial.main.stepbystep.stepbysteputils.pgconnector import create_engine_ready
from suricate.data.companies import getsource, gettarget
import pandas as pd
import numpy as np
engine = create_engine_ready()
# filefolder = '~/'
# leftpath = 'source.csv'
# rightpath = 'target.csv'
# df_source = pd.read_csv(filefolder + leftpath, index_col=0, sep='|', encoding='utf-8')
# df_target = pd.read_csv(filefolder + rightpath, index_col=0, sep='|', encoding='utf-8')
df_source_raw = getsource(nrows=500)
df_target_raw = gettarget(nrows=None)
from sklearn.model_selection import train_test_split
def rebuild_ytrue(ix):
y_true_saved = pd.read_sql(sql="SELECT * FROM y_true WHERE y_true.y_true = 1", con=engine).set_index(
['ix_source', 'ix_target'],
drop=True)['y_true']
y = pd.Series(index=ix, data = np.zeros(shape=len(ix)), name='y_true')
ix_common = y_true_saved.index.intersection(ix)
y.loc[ix_common] = y_true_saved.loc[ix_common]
return y
def prepare_source(df):
"""
Args:
df:
Returns:
pd.DataFrame
"""
df2 = df
return df2
def prepare_target(df):
"""
Args:
df:
Returns:
pd.DataFrame
"""
df2 = df
return df2
df_source = prepare_source(df_source_raw)
df_target = prepare_target(df_target_raw)
assert df_source.columns.equals(df_target.columns)
print(pd.datetime.now(),' | ', 'number of rows on left:{}'.format(df_source.shape[0]))
print(pd.datetime.now(),' | ', 'number of rows on right:{}'.format(df_target.shape[0]))
import pandas as pd
from tutorial.main.stepbystep.stepbysteputils.esconnector import getesconnector
escon = getesconnector()
from suricate.sbstransformers import SbsApplyComparator
from sklearn.pipeline import FeatureUnion
_sbs_score_list = [
('name_fuzzy', SbsApplyComparator(on='name', comparator='simple')),
('street_fuzzy', SbsApplyComparator(on='street', comparator='simple')),
('name_token', SbsApplyComparator(on='name', comparator='token')),
('street_token', SbsApplyComparator(on='street', comparator='token')),
('city_fuzzy', SbsApplyComparator(on='city', comparator='simple')),
('postalcode_fuzzy', SbsApplyComparator(on='postalcode', comparator='simple')),
('postalcode_contains', SbsApplyComparator(on='postalcode', comparator='contains'))
]
scorer_sbs = FeatureUnion(transformer_list=_sbs_score_list)
from suricate.pipeline import PartialClf
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import Normalizer
from sklearn.decomposition import PCA
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import precision_score, recall_score, accuracy_score
pipe = Pipeline(steps=[
('Impute', SimpleImputer(strategy='constant', fill_value=0)),
('Scaler', Normalizer()),
('PCA', PCA(n_components=4)),
('Predictor', GradientBoostingClassifier(n_estimators=2000))
])
pred = PartialClf(classifier=pipe)
left_train, left_test = train_test_split(df_source_raw, train_size=0.5)
Xst_train = escon.fit_transform(X=left_train)
ix_con_train = Xst_train.index
Xsbs_train = escon.getsbs(X=left_train, on_ix=ix_con_train)
scores_further_train = scorer_sbs.fit_transform(X=Xsbs_train)
scores_further_train = pd.DataFrame(data=scores_further_train, index=ix_con_train, columns=[c[0] for c in _sbs_score_list])
scores_further_train = pd.concat([Xst_train[['es_score']], scores_further_train], axis=1, ignore_index=False)
y_true_train = rebuild_ytrue(ix=ix_con_train)
pred.fit(X=scores_further_train, y=y_true_train)
y_pred_train = pred.predict(X=scores_further_train)
print(pd.datetime.now(),' | ', 'Scores on training data')
print(pd.datetime.now(),' | ', 'accuracy: {}'.format(accuracy_score(y_true=y_true_train, y_pred=y_pred_train)))
print(pd.datetime.now(),' | ', 'precision: {}'.format(precision_score(y_true=y_true_train, y_pred=y_pred_train)))
print(pd.datetime.now(),' | ', 'recall: {}'.format(recall_score(y_true=y_true_train, y_pred=y_pred_train)))
Xst_test = escon.transform(X=left_test)
ix_con_test = Xst_test.index
Xsbs_test = escon.getsbs(X=left_test, on_ix=ix_con_test)
scores_further_test = scorer_sbs.transform(X=Xsbs_test)
scores_further_test = pd.DataFrame(data=scores_further_test, index=ix_con_test, columns=[c[0] for c in _sbs_score_list])
scores_further_test = pd.concat([Xst_test[['es_score']], scores_further_test], axis=1, ignore_index=False)
y_true_test = rebuild_ytrue(ix=ix_con_test)
y_pred_test = pred.predict(X=scores_further_test)
print(pd.datetime.now(),' | ', 'Scores on testing data')
print(
|
pd.datetime.now()
|
pandas.datetime.now
|
import anemoi as an
import pandas as pd
import numpy as np
import scipy as sp
import statsmodels.api as sm
import scipy.odr.odrpack as odrpack
import warnings
def compare_sorted_df_columns(cols_1, cols_2):
return sorted(cols_1) == sorted(cols_2)
def valid_ws_correlation_data(data, ref_ws_col='ref', site_ws_col='site'):
'''Perform checks on wind speed correlation data.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
if ref_ws_col == site_ws_col:
raise ValueError("Error: Reference and site wind speed columns cannot have the same name.")
return False
if not compare_sorted_df_columns(data.columns.tolist(), [ref_ws_col, site_ws_col]):
raise ValueError("Error: the correlation data don't match the expected format.")
return False
if not data.shape[0] > 6:
warnings.warn("Warning: trying to correalate between less than six points.")
return False
if (data.loc[:,ref_ws_col] == data.loc[:,site_ws_col]).sum() == data.shape[0]:
warnings.warn("Warning: it seems you are trying to correalate a single mast against itself.")
return False
return True
def return_correlation_results_frame(ref_label='ref', site_label='site'):
results = pd.DataFrame(columns=['slope', 'offset' , 'R2', 'uncert', 'points'],
index=pd.MultiIndex.from_tuples([(ref_label, site_label)],
names=['ref', 'site'])
)
return results
def return_correlation_data_from_masts(ref_mast, site_mast):
'''Return a DataFrame of reference and site data for correlations.
Will be extracted from each MetMast object using the primary anemometers and wind vanes.
:Parameters:
ref_mast: MetMast
Anemoi MetMast object
site_mast: MetMast
Anemoi MetMast object
:Returns:
out: DataFrame with columns ref, site, and dir
'''
ref_data = ref_mast.return_primary_ano_vane_data()
ref_data.columns = ['ref', 'dir']
site_data = site_mast.return_primary_ano_vane_data()
site_data.columns = ['site', 'site_dir']
data = pd.concat([ref_data, site_data.site], axis=1).dropna()
data = data.loc[:, ['ref', 'site', 'dir']]
if not valid_ws_correlation_data(data=data, ref_ws_col='ref', site_ws_col='site'):
warning_string = "Warning: {} and {} don't seem to have valid concurrent data for a correlation.".format(ref_mast.name, site_mast.name)
warnings.warn(warning_string)
return data
### CORRELATION METHODS ###
def calculate_R2(data, ref_ws_col='ref', site_ws_col='site'):
'''Return a single R2 between two wind speed columns
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
r2 = data[ref_ws_col].corr(data[site_ws_col])**2
return r2
def calculate_IEC_uncertainty(data, ref_ws_col='ref', site_ws_col='site'):
'''Calculate the IEC correlation uncertainty between two wind speed columns
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
X = data.loc[:,ref_ws_col].values
Y = data.loc[:,site_ws_col].values
uncert = np.std(Y/X)*100/len(X)
return uncert*100.0
def calculate_EDF_uncertainty(data, ref_ws_col='ref', site_ws_col='site'):
'''Calculate the EDF estimated correaltion uncetianty between two wind speed columns.
Assumes a correalation forced through the origin
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref_ws_col and site_ws_col
ref_ws_col: string, default 'ref'
Reference anemometer data column to use.
site_ws_col: string, default 'site'
Site anemometer data column to use.
'''
data = data.loc[:,[ref_ws_col, site_ws_col]].dropna()
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return np.nan
X = data.loc[:,ref_ws_col].values
Y = data.loc[:,site_ws_col].values
Sxx = np.sum(X**2)
Syy = np.sum(Y**2)
Sxy = np.sum(X*Y)
B = 0.5*(Sxx - Syy)/Sxy
SU = -B + np.sqrt(B**2 + 1)
e2 = np.sum((Y - SU*X)**2)/(1 + SU**2)
Xsi2 = e2/(data.shape[0] - 1)
uncert = np.sqrt((Xsi2*SU**2)*(Sxx*Sxy**2 + 0.25*((Sxx - Syy)**2)*Sxx)/((B**2 + 1.0)*Sxy**4))
return uncert*100.0
def ws_correlation_least_squares_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using ordinary least squares regression.
https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.lstsq.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna()
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
if force_through_origin:
data.loc[:,'offset'] = 0
else:
data.loc[:,'offset'] = 1
X = data.loc[:, [ref_ws_col,'offset']].values
Y = data.loc[:, site_ws_col].values
slope, offset = np.linalg.lstsq(X, Y)[0]
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def f_with_offset(B, x):
return B[0]*x + B[1]
def f_without_offset(B, x):
return B[0]*x
def ws_correlation_orthoginal_distance_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using orthoganal distance regression.
https://docs.scipy.org/doc/scipy-0.18.1/reference/odr.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
X = data.loc[:, ref_ws_col].values
Y = data.loc[:, site_ws_col].values
data_mean = data.mean()
slope_estimate_via_ratio = data_mean[site_ws_col]/data_mean[ref_ws_col]
realdata = odrpack.RealData(X, Y)
if force_through_origin:
linear = odrpack.Model(f_without_offset)
odr = odrpack.ODR(realdata, linear, beta0=[slope_estimate_via_ratio])
slope = odr.run().beta[0]
offset = 0
else:
linear = odrpack.Model(f_with_offset)
odr = odrpack.ODR(realdata, linear, beta0=[slope_estimate_via_ratio, 0.0])
slope, offset = odr.run().beta[0], odr.run().beta[1]
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def ws_correlation_robust_linear_model(data, ref_ws_col='ref', site_ws_col='site', force_through_origin=False):
'''Calculate the slope and offset between two wind speed columns using robust linear model.
http://www.statsmodels.org/dev/rlm.html
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
points = data.shape[0]
R2 = calculate_R2(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
uncert = calculate_IEC_uncertainty(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
X = data.loc[:, ref_ws_col].values
Y = data.loc[:, site_ws_col].values
if not force_through_origin:
X = sm.add_constant(X)
else:
X = [np.zeros(X.shape[0]), X]
X = np.column_stack(X)
mod = sm.RLM(Y, X)
resrlm = mod.fit()
offset, slope = resrlm.params
R2 = sm.WLS(mod.endog, mod.exog, weights=mod.fit().weights).fit().rsquared
results.loc[pd.IndexSlice[ref_ws_col, site_ws_col],['slope', 'offset' , 'R2', 'uncert', 'points']] = np.array([slope, offset, R2, uncert, points])
return results
def ws_correlation_method(data, ref_ws_col='ref', site_ws_col='site', method='ODR', force_through_origin=False):
'''Calculate the slope and offset, for a given correlation method, between two wind speed columns.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
if method == 'ODR':
results = ws_correlation_orthoginal_distance_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
elif method == 'OLS':
results = ws_correlation_least_squares_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
elif method == 'RLM':
results = ws_correlation_robust_linear_model(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, force_through_origin=force_through_origin)
return results
def ws_correlation_binned_by_direction(data, ref_ws_col='ref', site_ws_col='site', ref_dir_col='dir', dir_sectors=16, method='ODR', force_through_origin=False):
'''Calculate the slope and offset, binned by direction, between two wind speed columns.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
ref_dir_col: string, default None (primary wind vane assumed)
Reference wind vane data to use. Extracted from MetMast.data
dir_sectors: int, default 16
Number of equally spaced direction sectors
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:,[ref_ws_col, site_ws_col, ref_dir_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
dir_bins = np.arange(1,dir_sectors+1)
results = pd.concat([results]*dir_sectors, axis=0)
results.index = pd.Index(dir_bins, name='dir_bin')
data['dir_bin'] = an.analysis.wind_rose.append_dir_bin(data[ref_dir_col], dir_sectors=dir_sectors)
for dir_bin in dir_bins:
dir_bin_data = data.loc[data['dir_bin']==dir_bin, [ref_ws_col, site_ws_col]]
points = dir_bin_data.shape[0]
if not valid_ws_correlation_data(data=dir_bin_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
results.loc[dir_bin, 'points'] = points
else:
uncert = calculate_IEC_uncertainty(data=dir_bin_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col)
dir_bin_results = ws_correlation_method(data=dir_bin_data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col, method=method, force_through_origin=force_through_origin)
results.loc[dir_bin, ['slope', 'offset', 'R2' , 'uncert', 'points']] = dir_bin_results.values
return results
def ws_correlation_binned_by_month(data, ref_ws_col='ref', site_ws_col='site', method='ODR', force_through_origin=False):
'''Calculate the slope and offset, binned by month, between two wind speed columns.
:Parameters:
data: DataFrame
DataFrame with wind speed columns ref and site, and direction data dir
ref_ws_col: string, default None (primary anemometer assumed)
Reference anemometer data to use. Extracted from MetMast.data
site_ws_col: string, default None (primary anemometer assumed)
Site anemometer data to use. Extracted from MetMast.data
method: string, default 'ODR'
Correlation method to use.
* Orthoginal distance regression: 'ODR'
* Ordinary least squares: 'OLS'
* Robust linear models: 'RLM'
force_through_origin: boolean, default False
Force the correlation through the origin (offset equal to zero)
:Returns:
out: DataFrame
slope, offset, R2, uncert, points
'''
data = data.loc[:, [ref_ws_col, site_ws_col]].dropna().astype(np.float)
results = return_correlation_results_frame(ref_label=ref_ws_col, site_label=site_ws_col)
if not valid_ws_correlation_data(data=data, ref_ws_col=ref_ws_col, site_ws_col=site_ws_col):
return results
months = np.arange(1,13)
results = pd.concat([results]*12, axis=0)
results.index =
|
pd.Index(months, name='month')
|
pandas.Index
|
import rqdatac as rd
from keras.models import Sequential
from keras.layers import Dense
from sklearn.model_selection import train_test_split
import numpy as np
import keras
import matplotlib.pyplot as plt
import pandas as pd
rd.init()
spot_input_data = rd.get_price('510050.XSHG', start_date='2015-06-01', end_date='2019-01-23', frequency='10m')
future_data = rd.get_price('IH88', start_date='2015-06-01', end_date='2019-01-23', frequency='10m')
future_data.drop(columns=['trading_date', 'limit_up', 'limit_down'], inplace=True)
spot_output_data = rd.get_price('510050.XSHG', start_date='2015-06-02', end_date='2019-01-24', frequency='1d',
fields=['open', 'close'])
spot_output_data = spot_output_data['close'] > spot_output_data['open']
def combine_spot_future(spot, future, direction):
spot1 = spot.copy()
spot1.columns = list(map(lambda x: 'spot_' + x, spot1.columns))
future1 = future.copy()
future1.columns = list(map(lambda x: 'future_' + x, future1.columns))
direction1 =
|
pd.DataFrame(direction)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import itertools
from .statistics import r
def __get_X_Y_L__(X, Y=None):
if type(X) is not pd.DataFrame:
X = pd.DataFrame(X)
if Y is None:
Y = X.iloc[:,0]
X = X.iloc[:,1:]
else:
if type(Y) is not pd.DataFrame:
Y = pd.DataFrame(Y)
L = X.shape[1]
return X, Y, L
def __get_r0_R__(r0=None, R=None, X=None, Y=None):
# Only one of the pairs should be provided: (r0, R) or (X, Y).
if r0 is not None or R is not None:
assert r0 is not None and R is not None
if type(r0) is not pd.DataFrame:
r0 = pd.DataFrame(r0)
if type(R) is not pd.DataFrame:
r0 = pd.DataFrame(r0)
else:
assert X is not None
X, Y, _ = __get_X_Y_L__(X, Y)
if r0 is None:
r0 = hellwig_r0(X, Y)
if R is None:
R = hellwig_R(X, Y)
return r0, R
def __get__J_cols__(columns, J_idx=None, J_cols=None, J_mask=None):
# Only one of J_idx, J_cols, and J_mask should be provided.
assert sum([J is not None for J in [J_idx, J_cols, J_mask]]) == 1
if J_mask is not None:
J_idx = np.where(J_mask)
if J_idx is not None:
if type(J_idx) is not pd.Index:
J_idx = pd.Index(J_idx)
J_cols = columns[tuple(J_idx,)]
if type(J_cols) is not pd.Index:
J_cols = pd.Index(J_cols)
return J_cols
def hellwig_r0(X, Y=None):
X, Y, L = __get_X_Y_L__(X, Y)
r0 = np.zeros(L)
for j in range(L):
r0[j] = r(Y, X.iloc[:,j])
r0 =
|
pd.DataFrame(r0)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from config_fh import get_db_engine, get_db_session, get_cache_file_path, STR_FORMAT_DATE
from fh_tools.fh_utils import return_risk_analysis, str_2_date
from fh_tools import fh_utils
import matplotlib.pyplot as plt # pycharm 需要通过现实调用 plt.show 才能显示plot
from datetime import date, datetime, timedelta
from sqlalchemy.types import String, Date, FLOAT
import datetime as dt
import logging
logger = logging.getLogger()
STRATEGY_TYPE_CN_EN_DIC = {'债券策略': 'fixed_income',
'套利策略': 'arbitrage',
'管理期货策略': 'cta',
'股票多头策略': 'long_only',
'阿尔法策略': 'alpha',
'宏观策略': 'macro',
'组合基金策略': 'fof'}
STRATEGY_TYPE_EN_CN_DIC = {en: cn for cn, en in STRATEGY_TYPE_CN_EN_DIC.items()}
def calc_wind_code_list_index(wind_code_list, date_since, file_name=None):
"""
计算 wind_code_list 组成的指数
:param wind_code_list:
:param date_since:
:param file_name: 默认为None,不生成文件
:return: 合成后的指数每日收益率列表
"""
# 获取样本子基金行情数据
wind_code_list_str = ', '.join(["'" + wind_code + "'" for wind_code in wind_code_list])
query_base_str = r'''select fv.wind_code, nav_date_week, fv.nav_acc
from (
select wind_code, adddate(nav_date, 4 - weekday(nav_date)) as nav_date_week, max(nav_date) as nav_date_max
from fund_nav
where wind_code in (%s)
group by wind_code, nav_date_week
) as ffv,
fund_nav fv
where ffv.nav_date_week >= %s
and fv.wind_code = ffv.wind_code
and fv.nav_date = ffv.nav_date_max
group by wind_code, nav_date_week
order by nav_date_week desc'''
query_str = query_base_str % (wind_code_list_str, date_since)
# logger.info(query_str)
engine = get_db_engine()
fund_nav_df = pd.read_sql_query(query_str, engine)
# 获取样本子基金名称
sql_str = """select wind_code, sec_name
from fund_info
where wind_code in (%s)"""
query_str = sql_str % wind_code_list_str
with get_db_session(engine) as session:
table = session.execute(query_str)
fund_code_name_dic = dict(table.fetchall())
# logger.info(df_fund_nav)
df_fund = fund_nav_df.pivot(index='nav_date_week', columns='wind_code', values='nav_acc')
df_fund.rename(columns=fund_code_name_dic, inplace=True)
# df_fund.to_csv('%s-%s【%d】 %s_%s.csv' % (strategy_name, sample_name, len(wind_code_list), date_from, date_to))
df_fund.interpolate(inplace=True)
df_fund.dropna(inplace=True)
wind_code_list = list(df_fund.columns)
wind_code_count = len(wind_code_list)
if wind_code_count == 0:
logger.info('wind_code_list_str has no data')
# df_fund.to_csv('%s_df_fund.csv' % sample_name)
weight = 1 / wind_code_count
# logger.info(df_fund)
fund_pct_df = df_fund.pct_change().fillna(0)
if file_name is not None:
file_path = get_cache_file_path(file_name)
fund_index_df = (1 + fund_pct_df).cumprod()
fund_index_df.to_csv(file_path)
fund_pct_df *= weight
# logger.info(df_fund_pct)
nav_index_pct_s = None
for wind_code in wind_code_list:
if nav_index_pct_s is None:
nav_index_pct_s = fund_pct_df[wind_code]
else:
nav_index_pct_s += fund_pct_df[wind_code]
# logger.info("df_nav_index_pct_s[%s]:\n" % wind_code, df_nav_index_pct_s)
date_list = list(fund_pct_df.index)
if len(date_list) == 0:
file_path = get_cache_file_path('df_fund_%s_%s.csv' % (file_name, date_since))
logger.info('子基金净值日期交集为空, 参见 %s文件查看具体数据', file_path)
df_fund.to_csv(file_path)
logger.info('between: %s ~ %s', min(date_list), max(date_list))
return nav_index_pct_s
def calc_strategy_index(strategy_name, date_from, date_to, calc_sample_name=None, create_sub_index_csv=False):
"""
计算策略指数
根据策略名称,提取策略样本基金代码,等权重捏合指数
:param strategy_name:策略名称
:param date_from:起始日期
:param date_to:截止日期
:param calc_sample_name: 需要计算的 sample_name。'main'为主指数,none为全部样本指数。
:return:返回主指数,及其他样本指数的df
"""
# logger.info('strategy %s between: %s %s', strategy_name, date_from, date_to)
with get_db_session() as session:
# 获取 nav_date 列表
stg_table = session.execute(
'SELECT nav_date_week, wind_code_str, sample_name FROM strategy_index_info where strategy_name=:stg_name order by nav_date_week desc',
{'stg_name': strategy_name})
date_last = None
index_pct_s = None
sample_name_list = []
sample_val_list = []
stg_table_data_list = []
for stg_info in stg_table.fetchall():
# date_since = stg_info[0]
# wind_code_str = stg_info[1]
sample_name = stg_info[2]
# logger.info('stg_info %s', stg_info)
if calc_sample_name is not None and sample_name != calc_sample_name:
continue
stg_table_data_list.append(
{'nav_date_week': stg_info[0], 'wind_code_str': stg_info[1], 'sample_name': sample_name})
stg_table_df = pd.DataFrame(stg_table_data_list)
logger.debug('stg_table_df.shape %s', stg_table_df.shape)
stg_table_df_gp = stg_table_df.groupby('sample_name')
stg_table_df_gp_dic = stg_table_df_gp.groups
for sample_name, row_num_list in stg_table_df_gp_dic.items():
index_pct_s = None
date_last = None
for row_num in row_num_list:
wind_code_str = stg_table_df.iloc[row_num]['wind_code_str']
date_since = stg_table_df.iloc[row_num]['nav_date_week']
wind_code_list = wind_code_str.split(sep=',')
if create_sub_index_csv:
file_name = '%s_%s_since_%s.csv' % (strategy_name, sample_name, date_since)
else:
file_name = None
nav_index_pct_s = calc_wind_code_list_index(wind_code_list, date_since, file_name)
logger.debug('%s\n%s', sample_name, nav_index_pct_s)
if date_last is None:
date_available = [d for d in nav_index_pct_s.index if date_from <= d <= date_to and date_since <= d]
else:
date_available = [d for d in nav_index_pct_s.index if
date_from <= d <= date_to and date_since <= d < date_last]
date_last = date_since
if index_pct_s is None:
index_pct_s = nav_index_pct_s.ix[date_available]
else:
index_pct_s.append(nav_index_pct_s.ix[date_available])
# logger.info(sample_name, '\n', index_pct_s)
sample_val_s = (1 + index_pct_s).cumprod()
sample_name_list.append(sample_name)
sample_val_list.append(sample_val_s)
# sample_val_s.to_csv('%s %s_%s.csv' % (strategy_name, date_from, date_to))
if len(sample_val_list) == 0:
index_df = None
else:
index_df = pd.DataFrame(sample_val_list, index=sample_name_list).T
index_df.rename(columns={'main': strategy_name}, inplace=True)
index_df.interpolate(inplace=True)
return index_df
def update_strategy_index(date_from_str, date_to_str):
"""
strategy_index_info 中所有 strategy_name 更新指数净值到数据库 strategy_index_val 中
:param date_from_str: 起始日期 %Y-%m-%d
:param date_to_str: 截止日期 %Y-%m-%d
:return:
"""
engine = get_db_engine()
with get_db_session(engine) as session:
stg_table = session.execute('select strategy_name from strategy_index_info group by strategy_name')
strategy_name_list = [stg_info[0] for stg_info in stg_table.fetchall()]
strategy_name_count = len(strategy_name_list)
if strategy_name_count == 0:
logger.info('strategy_index_info table is empty')
return
# strategy_name_list = ['long_only', 'cta', 'arbitrage', 'alpha', 'macro']
date_from = datetime.strptime(date_from_str, '%Y-%m-%d').date()
date_to = datetime.strptime(date_to_str, '%Y-%m-%d').date()
index_df_list = []
for strategy_name in strategy_name_list:
# strategy_name = 'long_only'
# index_df = calc_strategy_index(strategy_name, date_from, date_to, calc_sample_name='main')
stg_index_s = get_strategy_index_by_name(strategy_name, date_from, date_to, statistic=False)
if stg_index_s is not None:
logger.info('生成%s策略指数【%s ~ %s】', strategy_name, stg_index_s.index[0], stg_index_s.index[-1])
# index_df.to_csv('%s_sample_%s_%s.csv' % (strategy_name, date_from, date_to))
index_df = pd.DataFrame({'value': stg_index_s})
index_df.index.rename('nav_date', inplace=True)
index_df.reset_index(inplace=True)
# index_df.rename(columns={'nav_date_week': 'nav_date', strategy_name: 'value'}, inplace=True)
index_df['index_name'] = strategy_name
index_df_list.append(index_df)
else:
logger.info('No Data for shown on %s', strategy_name)
index_df_all = pd.concat(index_df_list)
index_df_all.set_index(['index_name', 'nav_date'], inplace=True)
# 重置内容
table_name = 'strategy_index_val'
with get_db_session(engine) as session:
# session.execute("delete from %s where nav_date between '%s' and '%s'" % (table_name, date_from_str, date_to_str))
session.execute("truncate table %s" % table_name)
index_df_all.to_sql(table_name, engine, if_exists='append',
dtype={
'index_name': String(20),
'nav_date': Date,
'value': FLOAT,
}
)
def stat_fund_by_stg(strategy_type, date_from, date_to):
"""
统计制定日期段内策略表现情况,包括:样本数、胜率,1%以上、-1%以下占比等
:param strategy_type:
:param date_from:
:param date_to:
:return:
"""
sql_str = """select fv.wind_code, nav_date_week, fv.nav_acc
from (
select wind_code, adddate(nav_date, 4 - weekday(nav_date)) as nav_date_week, max(nav_date) as nav_date_max
from fund_nav
where wind_code in (select wind_code from fund_info where strategy_type = '%s')
group by wind_code, nav_date_week
having nav_date_week between '%s' and '%s'
) as ffv,
fund_nav fv
where ffv.nav_date_week between '%s' and '%s'
and fv.wind_code = ffv.wind_code
and fv.nav_date = ffv.nav_date_max
group by wind_code, nav_date_week
order by nav_date_week desc"""
query_str = sql_str % (strategy_type, date_from, date_to, date_from, date_to)
engine = get_db_engine()
fund_nav_df = pd.read_sql_query(query_str, engine)
df_fund = fund_nav_df.pivot(index='nav_date_week', columns='wind_code', values='nav_acc')
# 获取样本子基金名称
sql_str = """select wind_code, sec_name
from fund_info
where wind_code in (%s)"""
wind_code_list_str = ', '.join(["'" + wind_code + "'" for wind_code in list(df_fund.columns)])
query_str = sql_str % wind_code_list_str
with get_db_session(engine) as session:
table = session.execute(query_str)
fund_code_name_dic = dict(table.fetchall())
# logger.info(df_fund_nav)
df_fund.rename(columns=fund_code_name_dic, inplace=True)
# df_fund.to_csv('%s-%s【%d】 %s_%s.csv' % (strategy_name, sample_name, len(wind_code_list), date_from, date_to))
df_fund.interpolate(inplace=True)
df_fund.dropna(axis=1, inplace=True)
wind_code_list = list(df_fund.columns)
wind_code_count = len(wind_code_list)
if wind_code_count == 0:
logger.info('wind_code_list_str has no data')
weight = 1 / wind_code_count
# logger.info(df_fund)
fund_pct_df = df_fund.pct_change().fillna(0)
fund_comprod_df = (1 + fund_pct_df).cumprod()
fund_comprod_df = fund_comprod_df[fund_comprod_df.columns[fund_comprod_df.max() != fund_comprod_df.min()]]
date_list = list(fund_pct_df.index)
win_count = (fund_comprod_df.iloc[-1] > 1).sum()
win_1_count = (fund_comprod_df.iloc[-1] > 1.01).sum()
loss_1_count = (fund_comprod_df.iloc[-1] < 0.99).sum()
logger.info('%s 统计日期: %s ~ %s', strategy_type, min(date_list), max(date_list))
logger.info('完整公布业绩数据的%d 只基金中', wind_code_count)
logger.info('获得正收益的产品有%d 只,占比%3.1f%%', win_count, win_count / wind_code_count * 100)
logger.info('收益超过1%%的产品有%d 只,占比%3.1f%%', win_1_count, win_1_count / wind_code_count * 100)
logger.info('亏损超过-1%%的有%d 只,占比%3.1f%%', loss_1_count, loss_1_count / wind_code_count * 100)
fund_index_df = fund_comprod_df.mean(axis=1)
fund_comprod_df[strategy_type] = fund_index_df
file_path = get_cache_file_path('%s %s.csv' % (strategy_type, date_to))
fund_comprod_df.to_csv(file_path)
file_path = get_cache_file_path('%s index %s.csv' % (strategy_type, date_to))
fund_index_df.to_csv(file_path)
def filter_wind_code(fund_nav_df, strategy_type_en):
"""债券策略指数中存在很多不符合标准的基金,因此需要以 strategy_index_info 中保持的列表为基准"""
query_str = "select wind_code_str from strategy_index_info where strategy_name = :strategy_type"
with get_db_session() as session:
row_data = session.execute(query_str, {'strategy_type': strategy_type_en}).fetchone()
if row_data is not None and len(row_data) > 0:
wind_code_str = row_data[0]
if wind_code_str is not None and len(wind_code_str) > 0:
wind_code_list = wind_code_str.split(',')
wind_code_list = list(set(list(fund_nav_df.columns)) & set(wind_code_list))
fund_nav_df = fund_nav_df[wind_code_list]
return fund_nav_df
def get_fund_nav_weekly_by_strategy(strategy_type_en, date_from, date_to,
show_fund_name=False, do_filter_wind_code=False):
"""
输入策略代码,起止日期,返回该策略所有基金周净值
:param strategy_type_en:
:param date_from:
:param date_to:
:param show_fund_name:
:return:
"""
global STRATEGY_TYPE_EN_CN_DIC
sql_str = """select fv.wind_code, nav_date_week, fv.nav_acc
from (
select wind_code, adddate(nav_date, 4 - weekday(nav_date)) as nav_date_week, max(nav_date) as nav_date_max
from fund_nav
where wind_code in (select wind_code from fund_info where strategy_type = '%s')
group by wind_code, nav_date_week
having nav_date_week between '%s' and '%s'
) as ffv,
fund_nav fv
where ffv.nav_date_week between '%s' and '%s'
and fv.wind_code = ffv.wind_code
and fv.nav_date = ffv.nav_date_max
group by wind_code, nav_date_week
order by nav_date_week desc"""
strategy_name_cn = STRATEGY_TYPE_EN_CN_DIC[strategy_type_en]
query_str = sql_str % (strategy_name_cn, date_from, date_to, date_from, date_to)
# logger.debug('策略子基金净值查询sql:\n%s', query_str)
engine = get_db_engine()
data_df =
|
pd.read_sql_query(query_str, engine)
|
pandas.read_sql_query
|
import functools
import itertools
import numpy as np
import pandas as pd
import pytest
from sid.events import calculate_infections_by_events
def event_infect_n(states, params, seed, i): # noqa: U100
s =
|
pd.Series(index=states.index, data=False)
|
pandas.Series
|
#!/usr/bin/env python3
#
# Description
#
# Libraries ------------------------------------------------------------------------------------------------------------
import logging
import sys
import pandas as pd
from tensorflow.io import gfile
from textblob import TextBlob
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import MinMaxScaler
import os
from pathlib import Path
# Helpers --------------------------------------------------------------------------------------------------------------
def load_data(input_data, mode):
logging.info(f'Loading data to {input_data}...')
if mode == 'cloud':
with gfile.GFile(name=input_data, mode='r') as file:
df = pd.read_csv(file)
else:
df = pd.read_csv(input_data)
logging.info(f'{input_data} successfully loaded!')
return df
def get_count_words(s):
return len(str(s).split(" "))
def get_count_char(s):
return sum(len(w) for w in str(s).split(" "))
def get_count_sents(s):
return len(str(s).split("."))
def get_count_exc_marks(s):
return s.count('!')
def get_count_question_marks(s):
return s.count('?')
def get_count_pct(s):
return len([w for w in s if w in '"#$%&\'()*+,-./:;<=>@[\\]^_`{|}~'])
def get_count_cap(s):
return sum(1 for w in s if w.isupper())
def get_polarity(s):
tb = TextBlob(s)
return tb.sentiment.polarity
def get_subjectivity(s):
tb = TextBlob(s)
return tb.sentiment.subjectivity
def get_text_features(df, text_var):
df_copy = df.copy()
# word count
logging.info('Get count words feature...')
df_copy['word_count'] = df_copy[text_var].apply(get_count_words)
# character count
logging.info('Get count characters feature...')
df_copy['char_count'] = df_copy[text_var].apply(get_count_char)
# sentence count
logging.info('Get count sentences feature...')
df_copy['sentence_count'] = df_copy[text_var].apply(get_count_sents)
# count capitals
logging.info('Get capitals words feature...')
df_copy['capitals_count'] = df_copy[text_var].apply(get_count_cap)
# count puncts
logging.info('Get count punctuation features...')
df_copy['punc_count'] = df_copy[text_var].apply(get_count_pct)
df_copy['exc_marks_count'] = df_copy[text_var].apply(get_count_exc_marks)
df_copy['question_marks_count'] = df_copy[text_var].apply(get_count_question_marks)
# avg word len
logging.info('Get word density feature...')
df_copy['avg_word_len'] = df_copy['char_count'] / df_copy['word_count']
# avg sentence len
logging.info('Get sentence density feature...')
df_copy['avg_sentence_len'] = df_copy['word_count'] / df_copy['sentence_count']
# avg cap
logging.info('Get capitals density feature...')
df_copy['avg_cap_len'] = df_copy.apply(lambda row: float(row['capitals_count']) / float(row['word_count']),
axis=1)
return df_copy
def get_nlp_features(df, text_var):
df_copy = df.copy()
# polarity
logging.info('Get polarity feature...')
df_copy['polarity'] = df_copy[text_var].apply(get_polarity)
# subjectivity
logging.info('Get subjectivity feature...')
df_copy['subjectivity'] = df_copy[text_var].apply(get_subjectivity)
return df_copy
def fit_tf_idf(data, text_var, params=None):
logging.info('Train TfidfTransformer...')
try:
if params:
tf_idf_vectorizer = TfidfVectorizer(**params)
else:
tf_idf_vectorizer = TfidfVectorizer()
tf_idf_vectorizer = tf_idf_vectorizer.fit(data[text_var])
except RuntimeError as error:
logging.error(error)
sys.exit(1)
else:
logging.info('TfidfTransformer successfully trained!')
return tf_idf_vectorizer
def fit_min_max_scaler(data, params=None):
logging.info('Train MinMaxScaler...')
try:
if params:
scaler = MinMaxScaler(**params)
else:
scaler = MinMaxScaler()
scaler = scaler.fit(data)
except RuntimeError as error:
logging.error(error)
sys.exit(1)
else:
logging.info('MinMaxScaler successfully trained!')
return scaler
def get_tfidf_df(df, text_cols, tfidf_matrix, cols):
df_copy = df.copy()
logging.info('Get Tf-Idf dataframe...')
df_copy = df_copy.drop(text_cols, axis=1)
tfidf_plain = tfidf_matrix.toarray()
tfidf = pd.DataFrame(tfidf_plain, columns=cols)
tfidf_df = pd.merge(df_copy, tfidf, how="left", left_index=True, right_index=True)
logging.info('Tf-Idf successfully created!')
return tfidf_df
def get_scaled_df(matrix, df):
scaled_df = pd.DataFrame(matrix, columns=df.columns)
return scaled_df
def save_data(x_df, y_df, path, out_data, mode, bucket):
df =
|
pd.merge(x_df, y_df, how="left", left_index=True, right_index=True)
|
pandas.merge
|
'''
Description:
A technique for detecting anomalies in seasonal univariate time
series where the input is a series of <timestamp, count> pairs.
Usage:
anomaly_detect_ts(x, granularity="day", max_anoms=0.1, direction="pos", alpha=0.05, only_last=None,
threshold="None", e_value=False, longterm=False, piecewise_median_period_weeks=2,
verbose=False)
Arguments:
x: Time series as a two column data frame where the first column
consists of the timestamps and the second column consists of
the observations.
granularity: Granularity for prediction. "day" "hr" or "min"
max_anoms: Maximum number of anomalies that S-H-ESD will detect as a
percentage of the data.
direction: Directionality of the anomalies to be detected. Options are:
"pos" | "neg" | "both".
alpha: The level of statistical significance with which to accept or
reject anomalies.
only_last: Find and report anomalies only within the last day or hr in
the time series. None | "day" | "hr".
threshold: Only report positive going anoms above the threshold
specified. Options are: None | "med_max" | "p95" |
"p99".
e_value: Add an additional column to the anoms output containing the
expected value.
longterm: Increase anom detection efficacy for time series that are
greater than a month. See Details below.
piecewise_median_period_weeks: The piecewise median time window as
described in Vallis, Hochenbaum, and Kejariwal (2014).
Defaults to 2.
verbose: Enable debug messages
Details:
"longterm" This option should be set when the input time series
is longer than a month. The option enables the approach described
in Vallis, Hochenbaum, and Kejariwal (2014).
"threshold" Filter all negative anomalies and those anomalies
whose magnitude is smaller than one of the specified thresholds
which include: the median of the daily max values (med_max), the
95th percentile of the daily max values (p95), and the 99th
percentile of the daily max values (p99).
Value:
The returned value is a list with the following components.
anoms: Data frame containing timestamps, values, and optionally
expected values.
"threshold" Filter all negative anomalies and those anomalies
whose magnitude is smaller than one of the specified thresholds
which include: the median of the daily max values (med_max), the
95th percentile of the daily max values (p95), and the 99th
percentile of the daily max values (p99).
Value:
The returned value is a list with the following components.
anoms: Data frame containing timestamps, values, and optionally
expected values.
One can save "anoms" to a file in the following fashion:
write.csv(<return list name>[["anoms"]], file=<filename>)
References:
<NAME>., <NAME>. and <NAME>., (2014) "A Novel
Technique for Long-Term Anomaly Detection in the Cloud", 6th
USENIX, Philadelphia, PA.
<NAME>., (May 1983), "Percentage Points for a Generalized ESD
Many-Outlier Procedure" , Technometrics, 25(2), pp. 165-172.
See Also:
anomaly_detect_vec
Examples:
# To detect all anomalies
anomaly_detect_ts(raw_data, max_anoms=0.02, direction="both")
# To detect only the anomalies on the last day, run the following:
anomaly_detect_ts(raw_data, max_anoms=0.02, direction="both", only_last="day")
# To detect only the anomalies on the last hr, run the following:
anomaly_detect_ts(raw_data, max_anoms=0.02, direction="both", only_last="hr")
'''
import numpy as np
import scipy as sp
import pandas as pd
import datetime
import statsmodels.api as sm
def anomaly_detect_ts(x: pd.Series, granularity="day", max_anoms=0.1, direction="pos", alpha=0.05, only_last=None,
threshold=None, e_value=False, longterm=False, piecewise_median_period_weeks=2,
verbose=False):
if x.size == 0:
print('Warning: passed empty series for anomalies')
return {
'anoms':
|
pd.Series()
|
pandas.Series
|
from multiprocessing.dummy import Pool
import requests
import pandas as pd
from requests.models import Response
def on_success(r: Response):
if r.status_code == 200:
print(f'Post succeed: {r.json()}')
else:
print(f'Post failed: {r}')
def on_error(ex: Exception):
print(f'Post requests failed: {ex}')
# Creates a pool with ten threads; more threads = more concurrency.
pool = Pool(80)
# "pool" is a module attribute; you can be sure there will only
# be one of them in your application
# as modules are cached after initialization.
if __name__ == '__main__':
futures_bbb = []
futures_yp = []
test_size = 5
with open('valid_zipcodes.csv', 'r') as f:
locations = [line.strip() for line in f.readlines()]
# locations = ["85033", "90001", "84044", "33101", "68001"]
categories = ["Concrete",
"Flooring",
"Glass",
"Doors",
"Tree Services",
"Interior Cleaning"]
for category in categories:
for location in (locations):
location = location.zfill(5)
futures_bbb.append(pool.apply_async(requests.post,
args=[
'https://82ip2yupkh.execute-api.us-west-1.amazonaws.com/stage/runbbb'],
kwds={'json': {
"country": "USA", "location": location,
"category": category}},
callback=on_success, error_callback=on_error))
futures_yp.append(pool.apply_async(requests.post,
args=[
'https://82ip2yupkh.execute-api.us-west-1.amazonaws.com/stage/runyp'],
kwds={'json': {"country": "USA", "location": location,
"category": category}},
callback=on_success, error_callback=on_error))
res_bbb = []
# futures is now a list of 10 futures.
for future in futures_bbb:
try:
# For each future, wait until the request is
res_bbb.append(future.get().json())
# finished and then print the response object.
except Exception as e:
print(e)
pass
res_yp = []
# futures is now a list of 10 futures.
for future in futures_yp:
try:
# For each future, wait until the request is
res_yp.append(future.get().json())
# finished and then print the response object.
except Exception as e:
# print(e)
pass
df = pd.DataFrame(res_bbb)
df.to_csv(f"test_{test_size}_zipcodes_results_bbb.csv", index=False)
df =
|
pd.DataFrame(res_yp)
|
pandas.DataFrame
|
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2020/01/02 17:37
Desc: 获取交易法门-工具: https://www.jiaoyifamen.com/tools/
交易法门首页: https://www.jiaoyifamen.com/
# 交易法门-工具-套利分析
交易法门-工具-套利分析-跨期价差(自由价差)
交易法门-工具-套利分析-自由价比
交易法门-工具-套利分析-多腿组合
交易法门-工具-套利分析-FullCarry
交易法门-工具-套利分析-套利价差矩阵
# 交易法门-工具-资讯汇总
交易法门-工具-资讯汇总-研报查询
交易法门-工具-资讯汇总-交易日历
# 交易法门-工具-持仓分析
交易法门-工具-持仓分析-期货持仓
交易法门-工具-持仓分析-席位持仓
交易法门-工具-持仓分析-持仓季节性
# 交易法门-工具-资金分析
交易法门-工具-资金分析-资金流向
交易法门-工具-资金分析-沉淀资金
交易法门-工具-资金分析-资金季节性
交易法门-工具-资金分析-成交排名
# 交易法门-工具-席位分析
交易法门-工具-席位分析-持仓结构
交易法门-工具-席位分析-持仓成本
交易法门-工具-席位分析-建仓过程
# 交易法门-工具-仓单分析
交易法门-工具-仓单分析-仓单日报
交易法门-工具-仓单分析-仓单查询
交易法门-工具-仓单分析-虚实盘比日报
交易法门-工具-仓单分析-虚实盘比查询
# 交易法门-工具-期限分析
交易法门-工具-期限分析-基差日报
交易法门-工具-期限分析-基差分析
交易法门-工具-期限分析-期限结构
交易法门-工具-期限分析-价格季节性
# 交易法门-工具-行情分析
交易法门-工具-行情分析-行情数据
# 交易法门-工具-交易规则
交易法门-工具-交易规则-限仓规定
交易法门-工具-交易规则-仓单有效期
交易法门-工具-交易规则-品种手册
"""
import time
import matplotlib.pyplot as plt
import pandas as pd
import requests
from mssdk.futures_derivative.cons import (
csa_payload,
csa_url_spread,
csa_url_ratio,
csa_url_customize,
)
from mssdk.futures_derivative.jyfm_login_func import jyfm_login
# pd.set_option('display.max_columns', None)
# 交易法门-工具-套利分析
def jyfm_tools_futures_spread(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers="", plot=True
):
"""
交易法门-工具-套利分析-跨期价差(自由价差)
:param type_1: str
:param type_2: str
:param code_1: str
:param code_2: str
:param plot: Bool
:return: pandas.Series or pic
"""
csa_payload_his = csa_payload.copy()
csa_payload_his.update({"type1": type_1})
csa_payload_his.update({"type2": type_2})
csa_payload_his.update({"code1": code_1})
csa_payload_his.update({"code2": code_2})
res = requests.get(csa_url_spread, params=csa_payload_his, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_ratio(
type_1="RB", type_2="RB", code_1="01", code_2="05", headers="", plot=True
):
"""
交易法门-工具-套利分析-自由价比
:param type_1: str
:param type_2: str
:param code_1: str
:param code_2: str
:param plot: Bool
:return: pandas.Series or pic
2013-01-04 -121
2013-01-07 -124
2013-01-08 -150
2013-01-09 -143
2013-01-10 -195
...
2019-10-21 116
2019-10-22 126
2019-10-23 123
2019-10-24 126
2019-10-25 134
"""
csa_payload_his = csa_payload.copy()
csa_payload_his.update({"type1": type_1})
csa_payload_his.update({"type2": type_2})
csa_payload_his.update({"code1": code_1})
csa_payload_his.update({"code2": code_2})
res = requests.get(csa_url_ratio, params=csa_payload_his, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_customize(
formula="RB01-1.6*I01-0.5*J01-1200", headers="", plot=True
):
"""
交易法门-工具-套利分析-多腿组合
:param formula: str
:param plot: Bool
:return: pandas.Series or pic
"""
params = {"formula": formula}
res = requests.get(csa_url_customize, params=params, headers=headers)
data_json = res.json()
data_df = pd.DataFrame([data_json["category"], data_json["value"]]).T
data_df.index = pd.to_datetime(data_df.iloc[:, 0])
data_df = data_df.iloc[:, 1]
data_df.name = "value"
if plot:
data_df.plot()
plt.legend(loc="best")
plt.xlabel("date")
plt.ylabel("value")
plt.show()
return data_df
else:
return data_df
def jyfm_tools_futures_full_carry(
begin_code="05", end_code="09", ratio="4", headers=""
):
"""
交易法门-工具-套利分析-FullCarry
https://www.jiaoyifamen.com/tools/future/full/carry?beginCode=05&endCode=09&ratio=4
注: 正向转抛成本主要是仓储费和资金成本,手续费占比很小,故忽略。增值税不确定,故也未列入计算。使用该表时注意仓单有效期问题、升贴水问题以及生鲜品种其他较高费用的问题。实际Full Carry水平要略高于这里的测算水平。
:param begin_code: 开始月份
:type begin_code: str
:param end_code: 结束月份
:type end_code: str
:param ratio: 百分比, 这里输入绝对值
:type ratio: str
:param headers: 请求头
:type headers: dict
:return: 正向市场转抛成本估算
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/full/carry"
params = {
"beginCode": begin_code,
"endCode": end_code,
"ratio": ratio,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["table_data"])
def jyfm_tools_futures_arbitrage_matrix(
category="1", type1="RB", type2="RB", headers=""
):
"""
交易法门-工具-套利分析-跨期价差矩阵
https://www.jiaoyifamen.com/tools/future/arbitrage/matrix
:param category: 1: 跨期价差; 2: 自由价差; 3: 自由价比
:type category: str
:param type1: 种类一
:type type1: str
:param type2: 种类二
:type type2: str
:param headers: 请求头
:type headers: dict
:return: 对应的矩阵
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/future/arbitrage/matrix"
params = {
"category": category,
"type1": type1,
"type2": type2,
"_": "1583846468579",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_exchange_symbol_dict():
jyfm_exchange_symbol_dict_inner = {
"中国金融期货交易所": {
"TF": "五债",
"T": "十债",
"IC": "中证500",
"IF": "沪深300",
"IH": "上证50",
"TS": "二债",
},
"郑州商品交易所": {
"FG": "玻璃",
"RS": "菜籽",
"CF": "棉花",
"LR": "晚稻",
"CJ": "红枣",
"JR": "粳稻",
"ZC": "动力煤",
"TA": "PTA",
"SA": "纯碱",
"AP": "苹果",
"WH": "强麦",
"SF": "硅铁",
"MA": "甲醇",
"CY": "棉纱",
"RI": "早稻",
"OI": "菜油",
"SM": "硅锰",
"RM": "菜粕",
"UR": "尿素",
"PM": "普麦",
"SR": "白糖",
},
"大连商品交易所": {
"PP": "PP",
"RR": "粳米",
"BB": "纤板",
"A": "豆一",
"EG": "乙二醇",
"B": "豆二",
"C": "玉米",
"JM": "焦煤",
"I": "铁矿",
"J": "焦炭",
"L": "塑料",
"M": "豆粕",
"P": "棕榈",
"CS": "淀粉",
"V": "PVC",
"Y": "豆油",
"JD": "鸡蛋",
"FB": "胶板",
"EB": "苯乙烯",
},
"上海期货交易所": {
"SS": "不锈钢",
"RU": "橡胶",
"AG": "沪银",
"AL": "沪铝",
"FU": "燃油",
"RB": "螺纹",
"CU": "沪铜",
"PB": "沪铅",
"BU": "沥青",
"AU": "沪金",
"ZN": "沪锌",
"SN": "沪锡",
"HC": "热卷",
"NI": "沪镍",
"WR": "线材",
"SP": "纸浆",
},
"上海国际能源交易中心": {"SC": "原油", "NR": "20号胶"},
}
return jyfm_exchange_symbol_dict_inner
# 交易法门-工具-资讯汇总
def jyfm_tools_research_query(limit="100", headers=""):
"""
交易法门-工具-资讯汇总-研报查询
https://www.jiaoyifamen.com/tools/research/qryPageList
:param limit: 返回条数
:type limit: str
:return: 返回研报信息数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/research/qryPageList"
params = {
"page": "1",
"limit": limit,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_trade_calendar(trade_date="2020-01-03", headers=""):
"""
交易法门-工具-资讯汇总-交易日历
此函数可以返回未来的交易日历数据
https://www.jiaoyifamen.com/tools/trade-calendar/events
:param trade_date: 指定交易日
:type trade_date: str
:return: 返回指定交易日的交易日历数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/trade-calendar/events"
params = {
"page": "1",
"limit": "1000",
"day": trade_date,
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
# 交易法门-工具-持仓分析
def jyfm_tools_position_detail(
symbol="JM", code="jm2005", trade_date="2020-01-03", headers=""
):
"""
交易法门-工具-持仓分析-期货持仓
:param symbol: 指定品种
:type symbol: str
:param code: 指定合约
:type code: str
:param trade_date: 指定交易日
:type trade_date: str
:param headers: headers with cookies
:type headers:dict
:return: 指定品种的指定合约的指定交易日的期货持仓数据
:rtype: pandas.DataFrame
"""
url = f"https://www.jiaoyifamen.com/tools/position/details/{symbol}?code={code}&day={trade_date}&_=1578040551329"
res = requests.get(url, headers=headers)
return pd.DataFrame(res.json()["short_rank_table"])
def jyfm_tools_position_seat(seat="永安期货", trade_date="2020-01-03", headers=""):
"""
交易法门-工具-持仓分析-持仓分析-席位持仓
:param seat: 指定期货公司
:type seat: str
:param trade_date: 具体交易日
:type trade_date: str
:param headers: headers with cookies
:type headers: dict
:return: 指定期货公司指定交易日的席位持仓数据
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/position/seat"
params = {
"seat": seat,
"day": trade_date,
"type": "",
"_": "1578040989932",
}
res = requests.get(url, params=params, headers=headers)
return pd.DataFrame(res.json()["data"])
def jyfm_tools_position_season(symbol="RB", code="05", headers=""):
"""
交易法门-工具-持仓分析-持仓分析-持仓季节性
https://www.jiaoyifamen.com/tools/position/season
:param symbol: 具体品种
:type symbol: str
:param code: 具体合约月份
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 合约持仓季节性规律
:rtype: pandas.DataFrame
"""
url = "https://www.jiaoyifamen.com/tools/position/season"
params = {
"type": symbol,
"code": code,
}
res = requests.get(url, params=params, headers=headers)
data_json = res.json()
temp_df = pd.DataFrame(
[
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
columns=data_json["dataCategory"],
).T
temp_df.columns = ["2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"]
return temp_df
# 交易法门-工具-资金分析
def jyfm_tools_position_fund_direction(
trade_date="2020-02-24", indicator="期货品种资金流向排名", headers=""
):
"""
交易法门-工具-资金分析-资金流向
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种资金流向排名" or "期货主力合约资金流向排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金流向数据
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种资金流向排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["flowCategory"]),
data_json["flowCategory"],
data_json["flowValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantFlowCategory"]),
data_json["dominantFlowCategory"],
data_json["dominantFlowValue"],
],
index=["date", "symbol", "fund"],
).T
def jyfm_tools_position_fund_down(
trade_date="2020-02-24", indicator="期货品种沉淀资金排名", headers=""
):
"""
交易法门-工具-资金分析-沉淀资金
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种沉淀资金排名" or "期货主力合约沉淀资金排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的沉淀资金
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种沉淀资金排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["precipitationCategory"]),
data_json["precipitationCategory"],
data_json["precipitationValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]]
* len(data_json["dominantPrecipitationCategory"]),
data_json["dominantPrecipitationCategory"],
data_json["dominantPrecipitationValue"],
],
index=["date", "symbol", "fund"],
).T
def jyfm_tools_position_fund_season(symbol="RB", code="05", headers=""):
"""
交易法门-工具-资金分析-资金季节性
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param symbol: 指定品种
:type symbol: str
:param code: 合约到期月
:type code: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金资金季节性
:rtype: pandas.DataFrame
"""
params = {
"type": symbol,
"code": code,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/season"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
data_df = pd.DataFrame(
[
data_json["dataCategory"],
data_json["year2013"],
data_json["year2014"],
data_json["year2015"],
data_json["year2016"],
data_json["year2017"],
data_json["year2018"],
data_json["year2019"],
data_json["year2020"],
],
index=["date", "2013", "2014", "2015", "2016", "2017", "2018", "2019", "2020"],
).T
return data_df
def jyfm_tools_position_fund_deal(
trade_date="2020-02-24", indicator="期货品种成交量排名", headers=""
):
"""
交易法门-工具-资金分析-成交排名
https://www.jiaoyifamen.com/tools/position/fund/?day=2020-01-08
:param trade_date: 指定交易日
:type trade_date: str
:param indicator: "期货品种成交量排名" or "期货主力合约成交量排名"
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日的资金成交排名
:rtype: pandas.DataFrame
"""
params = {
"day": trade_date,
}
url = "https://www.jiaoyifamen.com/tools/position/fund/"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "期货品种成交量排名":
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["turnOverCategory"]),
data_json["turnOverCategory"],
data_json["turnOverValue"],
],
index=["date", "symbol", "fund"],
).T
else:
return pd.DataFrame(
[
[data_json["tradingDay"]] * len(data_json["dominantTurnOverCategory"]),
data_json["dominantTurnOverCategory"],
data_json["dominantTurnOverValue"],
],
index=["date", "symbol", "fund"],
).T
# 交易法门-工具-席位分析-持仓结构
def jyfm_tools_position_structure(
trade_date="2020-03-02", seat="永安期货", indicator="持仓变化", headers=""
):
"""
交易法门-工具-席位分析-持仓结构
https://www.jiaoyifamen.com/tools/position/seat
:param trade_date: 指定交易日
:type trade_date: str
:param seat: broker name, e.g., seat="永安期货"
:type seat: str
:param indicator: 持仓变化,净持仓分布,总持仓分布; 持仓变化总,净持仓分布总,总持仓分布总
:type indicator: str
:param headers: headers with cookies
:type headers: dict
:return: 指定交易日指定机构的持仓结构
:rtype: pandas.DataFrame
"""
indicator_dict = {"持仓变化": 1, "净持仓分布": 2, "总持仓分布": 3}
params = {
"seat": seat,
"day": trade_date,
"type": indicator_dict[indicator],
"_": int(time.time() * 1000),
}
url = "https://www.jiaoyifamen.com/tools/position/struct"
r = requests.get(url, params=params, headers=headers)
data_json = r.json()
if indicator == "持仓变化":
return
|
pd.DataFrame(data_json["varieties"])
|
pandas.DataFrame
|
#Creating dashboard of covid case
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
#import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import numpy as np
import datetime as dt
from datetime import datetime
#Austin: http://www.austintexas.gov/COVID19
#Dallas: https://www.dallascounty.org/covid-19/
#Harris: http://publichealth.harriscountytx.gov/Resources/2019-Novel-Coronavirus/Harris-County-COVID-19-Confirmed-Cases
#Texas : https://txdshs.maps.arcgis.com/apps/opsdashboard/index.html#/ed483ecd702b4298ab01e8b9cafc8b83
#Texas: https://txdshs.maps.arcgis.com/apps/opsdashboard/index.html#/ed483ecd702b4298ab01e8b9cafc8b83
#<NAME> data: https://github.com/CSSEGISandData/COVID-19
#https://public.tableau.com/profile/christopher.paolini#!/vizhome/COVID-19Dashboard_15850633730350/UnitedStatesCOVID-19CaseTracker
#url = 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv'
# https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv'
def createLayout(title, xlabel, ylabel):
layout = go.Layout(
title={'text':title,
'x':0.5,'y':0.9,
'xanchor':'center','yanchor':'top'
},
yaxis=dict(
title=ylabel,
linecolor=colors['text'],
linewidth=2,
mirror=True,
showgrid=False,ticks='outside',fixedrange=True,automargin=False,
title_standoff=200,
constrain="domain"),
yaxis2=dict(
title='New Cases',
overlaying='y',
side='right',
showgrid=False, ticks='outside', fixedrange=True, automargin=True),
xaxis=dict(linewidth=2,linecolor=colors['text'],mirror=True,showgrid=False,ticks='outside', fixedrange=True,automargin=True),
xaxis_title=xlabel,
autosize=True,
paper_bgcolor=colors['background'],
plot_bgcolor=colors['plotbg'],
font=dict(color=colors['text'],size=14, family='Sans Serif'),
legend=dict(x=0.02,y=0.85,bgcolor=colors['plotbg'],orientation='v'),
)
return layout
def layoutUpdate(fig, pattern1, pattern2, logticks):
fig.update_layout(
updatemenus=[
dict(
type="buttons",
direction="left",
buttons=list([
dict(
args=[{'visible':pattern1},
{'yaxis':{'type':'linear', 'title':'Total', 'ticks':'outside', 'fixedrange':True, 'automargin':True,
'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],
label="linear",
method="update",
),
dict(
args=[{'visible':pattern2},
{'yaxis':{'type':'log', 'title':'Total', 'tickvals':logticks, 'ticks':'outside', 'fixedrange':True, 'automargin':True,
'linewidth':2, 'mirror':True, 'linecolor':colors['text']}}],
#{'yaxis':{'visible':[False,True]}}],
label="log",
method="update"
)
]),
pad={"r": 10, "t": 10},
showactive=True,
bgcolor='white',
x=0.02,
xanchor="auto",
y=1,
yanchor="auto"
)
]
)
return fig
def getSummary(cases):
casesToday = int(cases[len(cases)-1])
newCasesToday = int(np.diff(cases)[-1])
newCasesYesterday = int(np.diff(cases)[-2])
if newCasesToday > newCasesYesterday:
caseIncrease = colors['up']
else:
caseIncrease = colors['down']
return casesToday, newCasesToday, caseIncrease
def getBestFit(data, dates, startpt, tau):
#Get most recent date and increment by 1
lstdate = (data['Date'][len(data)-1])
lstdate = datetime.strptime(lstdate, '%m/%d/%y')
lstdate += dt.timedelta(days=1)
lstdate = lstdate.strftime('%m/%d/%y')
#Use multiple best fit lines
yt = 0
for idx, d in enumerate(dates):
if idx == len(dates)-1: #If at the end
xt = np.arange(len(data['Date'][d:])+1)
else:
xt = np.arange(len(data['Date'][d:dates[idx+1]]))
#xt = np.arange(len(data['Date'])+1)
yt_temp = np.round( np.exp(xt*tau[idx])*startpt[idx])
if idx == 0:
yt = yt_temp
else:
yt = np.append(yt, yt_temp)
newdate = (data['Date'].copy())
newdate = newdate.append(pd.Series(lstdate))
return newdate, yt
colors = {
'background': '#F5F5F5',
'text': '#484848',
'plotbg': '#FDFDFD',
'up': 'red',
'down': 'green'
}
#Load all data
gr = pd.read_csv('gr_rate.csv')
texascases = pd.read_csv('texascases.csv')
austincases =
|
pd.read_excel('AustinCases.xlsx', sheet_name='Austin')
|
pandas.read_excel
|
import pandas as pd
class Dashboard:
def __init__(self):
pass
@classmethod
def get_suite_statistics(self, suite_list):
suite_data_frame = pd.DataFrame.from_records(suite_list)
suite_stats = {
"Total" : (suite_data_frame.Name).count(),
"Pass" : (suite_data_frame.Status == 'PASS').sum(),
"Fail" : (suite_data_frame.Status == 'FAIL').sum(),
"Skip" : (suite_data_frame.Status == 'SKIP').sum(),
"Time" : (suite_data_frame.Time).sum(),
"Min" : (suite_data_frame.Time).min(),
"Max" : (suite_data_frame.Time).max(),
"Avg" : (suite_data_frame.Time).mean()
}
return suite_stats
@classmethod
def get_test_statistics(self, test_list):
test_data_frame = pd.DataFrame.from_records(test_list)
test_stats = {
"Total" : (test_data_frame.Status).count(),
"Pass" : (test_data_frame.Status == 'PASS').sum(),
"Fail" : (test_data_frame.Status == 'FAIL').sum(),
"Skip" : (test_data_frame.Status == 'SKIP').sum(),
"Time" : (test_data_frame.Time).sum(),
"Min" : (test_data_frame.Time).min(),
"Max" : (test_data_frame.Time).max(),
"Avg" : (test_data_frame.Time).mean()
}
return test_stats
@classmethod
def get_keyword_statistics(self, kw_list):
kw_data_frame =
|
pd.DataFrame.from_records(kw_list)
|
pandas.DataFrame.from_records
|
#============================================================================#
def show_values_on_bars(axs, vertical=True, space=0.4):
'''
Show Values on a bar chart.
Parameters
----------
axs : plt.axes
Axes matplotlib.
vertical : BOOL, optional
Show values on a vertical barplot. The default is True.
space : FLOAT, optional
Space between the end og the bar and the value. The default is 0.4.
Returns
-------
None.
'''
import numpy as np
def _show_on_single_plot(ax):
if vertical == True:
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height() + space
value = int(p.get_height())
ax.text(_x, _y, value, ha="center")
elif vertical == False:
for p in ax.patches:
_x = p.get_x() + p.get_width() + space
_y = p.get_y() + p.get_height() / 2
value = int(p.get_width())
ax.text(_x, _y, value, ha="left")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
#============================================================================#
def display_filling_of_col(df, line=0, color='#3556C8', figsize=(8, 5), show_values=False):
'''
Display the filling of columns in a Dataframe.
Parameters
----------
df : Dataframe
Dataframe.
line : INT, optional
Number of line to display. The default is 0 to display all lines.
color : COLOR, optional
Color of the plot. The default is '#3556C8'.
figsize : TUPLE, optional
Size of the plot. The default is (8, 5).
show_values : BOOL, optional
Show values. The default is False.
Returns
-------
None.
'''
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df_tmp = pd.DataFrame()
for col in df.columns:
df_tmp[col] = pd.Series(df[col].count())
x = list(df_tmp.T.sort_values(by=0, ascending=False)[0] / df.shape[0] * 100)
y = list(df_tmp.T.sort_values(by=0, ascending=False).index)
fig, ax = plt.subplots(figsize=figsize)
if line == 0:
sns.barplot(x=x,
y=y,
orient='h', color=color)
else:
sns.barplot(x=x[:line],
y=y[:line],
orient='h', color=color)
if show_values == True:
show_values_on_bars(ax, vertical=False)
#============================================================================#
def display_cate_bar(data, var, show_values=True, figsize=(5,5), color='b'):
'''
Display the distribution of a categorical variable.
Parameters
----------
data : Dataframe
Dataframe.
var : STRING
Name of the variable to display.
show_values : BOOL, optional
Show values. The default is True.
figsize : TUPLE, optional
Size of the plot. The default is (5,5).
color : COLOR, optional
Color of the plot. The default is 'b'.
Returns
-------
None.
'''
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
value_cont = pd.DataFrame.from_dict(dict(data[var].value_counts())
,orient='index')
fig, ax = plt.subplots(figsize=figsize)
sns.barplot(x=value_cont[0],
y=value_cont.index,
color=color,
orient='h')
if show_values:
show_values_on_bars(ax, vertical=False)
#============================================================================#
def downcast(df):
'''
This function tries to downcast integer and floating dtypes columns
to the smallest numerical corresponding dtype.
It returns a dictionnary of the actually downcasted dtypes.
Parameters
----------
df : Dataframe
Dataframe to downcast.
Returns
-------
dict_dtypes : DICT
DESCRIPTION.
'''
import pandas as pd
# initialise the dict of downcasted dtypes for features
dict_dtypes = {}
# getting list of integer columns
columns_int = df.select_dtypes(include=['integer']).columns
for column in columns_int:
old_dtype = str(df[column].dtypes)
# trying to downcast integer columns (np.int8 to np.int64)
df[column] =
|
pd.to_numeric(df[column], downcast='integer')
|
pandas.to_numeric
|
import pandas as pd
# Used to plot the results
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
url = 'http://samplecsvs.s3.amazonaws.com/SalesJan2009.csv'
# Use pandas to import data
orig_df = pd.read_csv(url)
# To keep original dataframe for referencing
df = orig_df.copy()
print(df.head())
print(df.dtypes)
# Convert object types
df['Transaction_date'] = pd.to_datetime(df['Transaction_date'])
df['Account_Created'] = pd.to_datetime(df['Account_Created'])
df['Last_Login'] = pd.to_datetime(df['Last_Login'])
print(df.dtypes)
# Convert prices to numeric in order to access math functions
df['Price'] = df['Price'].str.replace(',','')
df['Price'] =
|
pd.to_numeric(df['Price'])
|
pandas.to_numeric
|
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected =
|
Series(0, index=df.columns)
|
pandas.Series
|
import filecmp
import os
import pandas as pd
import pytest
import sas7bdat_converter.converter as converter
import shutil
import xlrd
from pathlib import Path
from glob import glob
current_dir = Path().absolute()
def test_batch_to_csv(tmpdir, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = Path(tmpdir).joinpath('file1.csv')
converted_file_2 = Path(tmpdir).joinpath('file2.csv')
converted_file_3 = Path(tmpdir).joinpath('file3.csv')
file_dict = [
{'sas7bdat_file': sas_file_1, 'export_file': converted_file_1},
{'sas7bdat_file': sas_file_2, 'export_file': converted_file_2},
{'sas7bdat_file': sas_file_3, 'export_file': converted_file_3},
]
converter.batch_to_csv(file_dict)
files_created = False
if (converted_file_1.is_file() and
converted_file_2.is_file() and
converted_file_3.is_file()):
files_created = True
assert files_created
file_dicts = [
[{'bad_key': 'test.sas7bdat', 'export_file': 'test.csv'}],
[{'sas7bdat_file': 'test.sas7bdat', 'bad_key': 'test.csv'}],
[{'sas_bad_key': 'test.sas7bdate', 'export_bad_key': 'test.csv'}],
]
@pytest.mark.parametrize('file_dict', file_dicts)
def test_batch_to_csv_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_csv(file_dict)
assert 'Invalid key provided' in str(execinfo.value)
def test_batch_to_excel(tmpdir, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = Path(tmpdir).joinpath('file1.xlsx')
converted_file_2 = Path(tmpdir).joinpath('file2.xlsx')
converted_file_3 = Path(tmpdir).joinpath('file3.xlsx')
file_dict = [
{'sas7bdat_file': sas_file_1, 'export_file': converted_file_1},
{'sas7bdat_file': sas_file_2, 'export_file': converted_file_2},
{'sas7bdat_file': sas_file_3, 'export_file': converted_file_3},
]
converter.batch_to_excel(file_dict)
files_created = False
if (converted_file_1.is_file() and
converted_file_2.is_file() and
converted_file_3.is_file()):
files_created = True
assert(files_created)
file_dicts = [
[{'bad_key': 'test.sas7bdat', 'export_file': 'test.xlsx'}],
[{'sas7bdat_file': 'test.sas7bdat', 'bad_key': '<KEY>'}],
[{'sas_bad_key': 'test.sas7bdate', 'export_bad_key': '<KEY>'}],
]
@pytest.mark.parametrize('file_dict', file_dicts)
def test_batch_to_excel_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_excel(file_dict)
assert 'Invalid key provided' in str(execinfo.value)
def test_batch_to_json(tmpdir, sas_file_1, sas_file_2, sas_file_3):
converted_file_1 = Path(tmpdir).joinpath('file1.json')
converted_file_2 = Path(tmpdir).joinpath('file2.json')
converted_file_3 = Path(tmpdir).joinpath('file3.json')
file_dict = [
{'sas7bdat_file': sas_file_1, 'export_file': converted_file_1},
{'sas7bdat_file': sas_file_2, 'export_file': converted_file_2},
{'sas7bdat_file': sas_file_3, 'export_file': converted_file_3},
]
converter.batch_to_json(file_dict)
files_created = False
if (converted_file_1.is_file() and
converted_file_2.is_file() and
converted_file_3.is_file()):
files_created = True
assert(files_created)
file_dicts = [
[{'bad_key': 'test.sas7bdat', 'export_file': 'test.json'}],
[{'sas7bdat_file': 'test.sas7bdat', 'bad_key': 'test.json'}],
[{'sas_bad_key': 'test.sas7bdate', 'export_bad_key': 'test.json'}],
]
@pytest.mark.parametrize('file_dict', file_dicts)
def test_batch_to_json_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_json(file_dict)
assert 'Invalid key provided' in str(execinfo.value)
optionals = [
{},
{'root_node': 'root'},
{'first_node': 'item'},
{'root_node': 'root', 'first_node': 'item'},
]
@pytest.mark.parametrize('optional', optionals)
def test_batch_to_xml(tmpdir, sas_file_1, sas_file_2, sas_file_3, optional):
converted_file_1 = Path(tmpdir).joinpath('file1.xml')
converted_file_2 = Path(tmpdir).joinpath('file2.xml')
converted_file_3 = Path(tmpdir).joinpath('file3.xml')
if optional.get('root_node') and optional.get('first_node'):
file_dict = [
{
'sas7bdat_file': sas_file_1,
'export_file': converted_file_1,
'root_node': optional.get('root_node'),
'first_node': optional.get('first_node'),
},
{
'sas7bdat_file': sas_file_2,
'export_file': converted_file_2,
'root_node': optional.get('root_node'),
'first_node': optional.get('first_node'),
},
{
'sas7bdat_file': sas_file_3,
'export_file': converted_file_3,
'root_node': optional.get('root_node'),
'first_node': optional.get('first_node'),
},
]
elif optional.get('root_node'):
file_dict = [
{
'sas7bdat_file': sas_file_1,
'export_file': converted_file_1,
'root_node': optional.get('root_node'),
},
{
'sas7bdat_file': sas_file_2,
'export_file': converted_file_2,
'root_node': optional.get('root_node'),
},
{
'sas7bdat_file': sas_file_3,
'export_file': converted_file_3,
'root_node': optional.get('root_node'),
},
]
elif optional.get('first_node'):
file_dict = [
{
'sas7bdat_file': sas_file_1,
'export_file': converted_file_1,
'first_node': optional.get('first_node'),
},
{
'sas7bdat_file': sas_file_2,
'export_file': converted_file_2,
'first_node': optional.get('first_node'),
},
{
'sas7bdat_file': sas_file_3,
'export_file': converted_file_3,
'first_node': optional.get('first_node'),
},
]
else:
file_dict = [
{'sas7bdat_file': sas_file_1, 'export_file': converted_file_1,},
{'sas7bdat_file': sas_file_2, 'export_file': converted_file_2,},
{'sas7bdat_file': sas_file_3, 'export_file': converted_file_3,},
]
converter.batch_to_xml(file_dict)
files_created = False
if (converted_file_1.is_file() and
converted_file_2.is_file() and
converted_file_3.is_file()):
files_created = True
assert(files_created)
file_dicts = [
[{'bad_key': 'test.sas7bdat', 'export_file': 'test.xml'}],
[{'sas7bdat_file': 'test.sas7bdat', 'bad_key': '<KEY>'}],
[{'sas_bad_key': 'test.sas7bdate', 'export_bad_key': '<KEY>'}],
[{'sas7bdat_file': 'test.sas7bdat', 'export_file': 'test.xml', 'root_node': 'test', 'bad': 'test'}],
[{'sas7bdat_file': 'test.sas7bdat', 'export_file': 'test.xml', 'bad': 'test', 'first_node': 'test'}],
]
@pytest.mark.parametrize('file_dict', file_dicts)
def test_batch_to_xml_invalid_key(file_dict):
with pytest.raises(KeyError) as execinfo:
converter.batch_to_xml(file_dict)
assert 'Invalid key provided' in str(execinfo.value)
def test_dir_to_csv_same_dir(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_csv(str(tmpdir))
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'csv'])
assert sas_counter == convert_counter
def test_dir_to_csv_different_dir(tmpdir, sas7bdat_dir):
converter.dir_to_csv(dir_path=str(sas7bdat_dir), export_path=str(tmpdir))
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'csv'])
assert sas_counter == convert_counter
def test_dir_to_excel_same_dir(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_excel(str(tmpdir))
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'xlsx'])
assert sas_counter == convert_counter
def test_dir_to_excel_different_dir(tmpdir, sas7bdat_dir):
converter.dir_to_excel(dir_path=str(sas7bdat_dir), export_path=str(tmpdir))
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'xlsx'])
assert sas_counter == convert_counter
def test_dir_to_json_same_dir(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_json(str(tmpdir))
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'json'])
assert sas_counter == convert_counter
def test_dir_to_json_different_dir(tmpdir, sas7bdat_dir):
converter.dir_to_json(str(sas7bdat_dir), str(tmpdir))
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'json'])
assert sas_counter == convert_counter
def test_dir_to_xml_same_dir(tmpdir, sas7bdat_dir):
sas_files = [str(x) for x in sas7bdat_dir.iterdir()]
for sas_file in sas_files:
shutil.copy(sas_file, tmpdir)
converter.dir_to_xml(str(tmpdir))
sas_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'xml'])
assert sas_counter == convert_counter
def test_dir_to_xml_different_dir(tmpdir, sas7bdat_dir):
converter.dir_to_xml(str(sas7bdat_dir), str(tmpdir))
sas_counter = len([name for name in Path(sas7bdat_dir).iterdir() if name.suffix == 'sas7bdat'])
convert_counter = len([name for name in Path(tmpdir).iterdir() if name.suffix == 'xml'])
assert sas_counter == convert_counter
exception_data = [
('sas7bdat conversion error - Valid extension for to_csv conversion is: .csv', ['.csv'], 'to_csv'),
('sas7bdat conversion error - Valid extensions for to_csv conversion are: .csv, .txt', ['.csv', '.txt'], 'to_csv'),
]
@pytest.mark.parametrize('exception', exception_data)
def test_file_extension_exception_message(exception):
valid_message = exception[0]
valid_extensions = exception[1]
test_message = converter._file_extension_exception_message(exception[2], valid_extensions)
assert valid_message == test_message
def test_invalid_key_exception_message_no_optional():
valid_message = 'Invalid key provided, expected keys are: sas7bdat_file, export_file'
required_keys = ['sas7bdat_file', 'export_file']
test_message = converter._invalid_key_exception_message(required_keys=required_keys)
assert valid_message == test_message
def test_invalid_key_exception_message_optional():
valid_message = 'Invalid key provided, expected keys are: sas7bdat_file, export_file and optional keys are: root_node, first_node'
required_keys = ['sas7bdat_file', 'export_file']
optional_keys = ['root_node', 'first_node']
test_message = converter._invalid_key_exception_message(required_keys=required_keys, optional_keys=optional_keys)
assert valid_message == test_message
@pytest.mark.parametrize('data', [
(('.txt', '.csv',), '.xml'),
(('.sas7bdat',), '.json'),
])
def test_is_valid_extension_false(data):
valid_extensions = data[0]
file_extension = data[1]
assert not converter._is_valid_extension(valid_extensions, file_extension)
@pytest.mark.parametrize('data', [
(('.txt', '.csv',), '.csv'),
(('.sas7bdat',), '.sas7bdat'),
])
def test_is_valid_extension_true(data):
valid_extensions = data[0]
file_extension = data[1]
assert converter._is_valid_extension(valid_extensions, file_extension)
@pytest.fixture(params=['sas_file_1', 'sas_file_2', 'sas_file_3'])
def test_to_csv(tmpdir, request, expected_dir):
sas_file = request.getfixturevalue(request.param)
converted_file = Path(tmpdir).joinpath('file1.csv')
expected_file = expected_dir.joinpath('file1.csv')
converter.to_csv(sas_file, converted_file)
assert filecmp.cmp(converted_file, expected_file, shallow=False)
def test_to_csv_invalid_extension():
with pytest.raises(AttributeError) as execinfo:
converter.to_csv('test.sas7bdat', 'test.bad')
assert 'sas7bdat conversion error - Valid extension' in str(execinfo.value)
def test_to_dataframe(sas_file_1):
d = {
'integer_row': [1.0, 2.0, 3.0, 4.0, 5.0,],
'text_row': [
'Some text',
'Some more text',
'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc lobortis, risus nec euismod condimentum, lectus ligula porttitor massa, vel ornare mauris arcu vel augue. Maecenas rhoncus consectetur nisl, ac convallis enim pellentesque efficitur. Praesent tristique . End of textlectus a dolor sodales, in porttitor felis auctor. Etiam dui mauris, commodo at venenatis eu, lacinia nec tellus. Curabitur dictum tincidunt convallis. Duis vestibulum mauris quis felis euismod bibendum. Nulla eget nunc arcu. Nam quis est urna. In eleifend ultricies ultrices. In lacinia auctor ex, sed commodo nisl fringilla sed. Fusce iaculis viverra eros, nec elementum velit aliquam non. Aenean sollicitudin consequat libero, eget mattis.',
'Text',
'Test',
],
'float_row': [2.5, 17.23, 3.21, 100.9, 98.6,],
'date_row': ['2018-01-02', '2018-02-05', '2017-11-21', '2016-05-19', '1999-10-25',]
}
df = pd.DataFrame(data=d)
df['date_row'] = pd.to_datetime(df['date_row'])
df = df[['integer_row', 'text_row', 'float_row', 'date_row']]
df_file = converter.to_dataframe(sas_file_1)
pd.testing.assert_frame_equal(df, df_file, check_datetimelike_compat=True)
@pytest.fixture(params=['sas_file_1', 'sas_file_2', 'sas_file_3'])
def test_to_excel(tmpdir, request, expected_dir):
sas_file = request.getfixturevalue(request.param)
converted_file = Path(tmpdir).joinpath('file1.xlsx')
expected_file = expected_dir.joinpath('file1.xlsx')
converter.to_excel(sas_file, converted_file)
df_expected = pd.read_excel(expected_file)
df_converted =
|
pd.read_excel(converted_file)
|
pandas.read_excel
|
import json
from unittest.mock import MagicMock, patch
import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from evalml.exceptions import PipelineScoreError
from evalml.model_understanding.prediction_explanations.explainers import (
abs_error,
cross_entropy,
explain_prediction,
explain_predictions,
explain_predictions_best_worst
)
from evalml.problem_types import ProblemTypes
def compare_two_tables(table_1, table_2):
assert len(table_1) == len(table_2)
for row, row_answer in zip(table_1, table_2):
assert row.strip().split() == row_answer.strip().split()
test_features = [[1], np.ones((15, 1)), pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}).iloc[0],
pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}),
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Run the stimuli used in the MEG experiment through the model and record the
activity in each layer.
"""
import torch
from torchvision import transforms
import numpy as np
import pickle
import pandas as pd
import mkl
mkl.set_num_threads(4)
from PIL import Image
from tqdm import tqdm
import network
import dataloader
data_path = './data'
classes = dataloader.WebDataset(f'{data_path}/datasets/epasana-10kwords').classes
classes.append(pd.Series(['noise'], index=[10000]))
# In order to get word2vec vectors, the KORAANI class was replaced with
# KORAANIN. Switch this back, otherwise, this word will be erroneously flagged
# as being misclassified.
classes[classes == 'KORAANIN'] = 'KORAANI'
# Load the TIFF images presented in the MEG experiment and apply the
# ImageNet preprocessing transformation to them.
stimuli =
|
pd.read_csv('stimuli.csv')
|
pandas.read_csv
|
import warnings
warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARMA',
FutureWarning)
warnings.filterwarnings('ignore', 'statsmodels.tsa.arima_model.ARIMA',
FutureWarning)
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from statsmodels.tsa.arima_model import ARIMA
data =
|
pd.read_pickle('./train1.pkl')
|
pandas.read_pickle
|
# Authors: <NAME> <<EMAIL>>
# + All contributors to <https://github.com/smarie/python-azureml-client>
#
# License: 3-clause BSD, <https://github.com/smarie/python-azureml-client/blob/master/LICENSE>
from io import BytesIO # to handle byte strings
from io import StringIO # to handle unicode strings
from requests import Session
from valid8 import validate
import pandas as pd
try: # python 3.5+
from typing import Dict, Union, List, Any, Tuple
# a few predefined type hints
SwaggerModeAzmlTable = List[Dict[str, Any]]
NonSwaggerModeAzmlTable = Dict[str, Union[List[str], List[List[Any]]]]
AzmlTable = Union[SwaggerModeAzmlTable, NonSwaggerModeAzmlTable]
AzmlOutputTable = Dict[str, Union[str, AzmlTable]]
AzmlBlobTable = Dict[str, str]
except ImportError:
pass
from azure.storage.blob import BlockBlobService, ContentSettings
from azmlclient.base_databinding import csv_to_df, df_to_csv
def csv_to_blob_ref(csv_str, # type: str
blob_service, # type: BlockBlobService
blob_container, # type: str
blob_name, # type: str
blob_path_prefix=None, # type: str
charset=None # type: str
):
# type: (...) -> AzmlBlobTable
"""
Uploads the provided CSV to the selected Blob Storage service, and returns a reference to the created blob in
case of success.
:param csv_str:
:param blob_service: the BlockBlobService to use, defining the connection string
:param blob_container: the name of the blob storage container to use. This is the "root folder" in azure blob
storage wording.
:param blob_name: the "file name" of the blob, ending with .csv or not (in which case the .csv suffix will be
appended)
:param blob_path_prefix: an optional folder prefix that will be used to store your blob inside the container.
For example "path/to/my/"
:param charset:
:return:
"""
# setup the charset used for file encoding
if charset is None:
charset = 'utf-8'
elif charset != 'utf-8':
print("Warning: blobs can be written in any charset but currently only utf-8 blobs may be read back into "
"DataFrames. We recommend setting charset to None or utf-8 ")
# validate inputs (the only one that is not validated below)
validate('csv_str', csv_str, instance_of=str)
# 1- first create the references in order to check all params are ok
blob_reference, blob_full_name = create_blob_ref(blob_service=blob_service, blob_container=blob_container,
blob_path_prefix=blob_path_prefix, blob_name=blob_name)
# -- push blob
blob_stream = BytesIO(csv_str.encode(encoding=charset))
# noinspection PyTypeChecker
blob_service.create_blob_from_stream(blob_container, blob_full_name, blob_stream,
content_settings=ContentSettings(content_type='text.csv',
content_encoding=charset))
# (For old method with temporary files: see git history)
return blob_reference
def csvs_to_blob_refs(csvs_dict, # type: Dict[str, str]
blob_service, # type: BlockBlobService
blob_container, # type: str
blob_path_prefix=None, # type: str
blob_name_prefix=None, # type: str
charset=None # type: str
):
# type: (...) -> Dict[str, Dict[str, str]]
"""
Utility method to push all inputs described in the provided dictionary into the selected blob storage on the cloud.
Each input is an entry of the dictionary and containing the description of the input reference as dictionary.
The string will be written to the blob using the provided charset.
Note: files created on the blob storage will have names generated from the current time and the input name, and will
be stored in
:param csvs_dict:
:param blob_service:
:param blob_container:
:param blob_path_prefix: the optional prefix that will be prepended to all created blobs in the container
:param blob_name_prefix: the optional prefix that will be prepended to all created blob names in the container
:param charset: an optional charset to be used, by default utf-8 is used
:return: a dictionary of "by reference" input descriptions as dictionaries
"""
validate('csvs_dict', csvs_dict, instance_of=dict)
if blob_name_prefix is None:
blob_name_prefix = ""
else:
validate('blob_name_prefix', blob_name_prefix, instance_of=str)
return {blobName: csv_to_blob_ref(csvStr, blob_service=blob_service, blob_container=blob_container,
blob_path_prefix=blob_path_prefix, blob_name=blob_name_prefix + blobName,
charset=charset)
for blobName, csvStr in csvs_dict.items()}
def blob_ref_to_csv(blob_reference, # type: AzmlBlobTable
blob_name=None, # type: str
encoding=None, # type: str
requests_session=None # type: Session
):
"""
Reads a CSV stored in a Blob Storage and referenced according to the format defined by AzureML, and transforms
it into a DataFrame.
:param blob_reference: a (AzureML json-like) dictionary representing a table stored as a csv in a blob storage.
:param blob_name: blob name for error messages
:param encoding: an optional encoding to use to read the blob
:param requests_session: an optional Session object that should be used for the HTTP communication
:return:
"""
validate(blob_name, blob_reference, instance_of=dict)
if encoding is not None and encoding != 'utf-8':
raise ValueError("Unsupported encoding to retrieve blobs : %s" % encoding)
if ('ConnectionString' in blob_reference.keys()) and ('RelativeLocation' in blob_reference.keys()):
# create the Blob storage client for this account
blob_service = BlockBlobService(connection_string=blob_reference['ConnectionString'],
request_session=requests_session)
# find the container and blob path
container, name = blob_reference['RelativeLocation'].split(sep='/', maxsplit=1)
# retrieve it and convert
# -- this works but is probably less optimized for big blobs that get chunked, than using streaming
blob_string = blob_service.get_blob_to_text(blob_name=name, container_name=container)
return blob_string.content
else:
raise ValueError(
'Blob reference is invalid: it should contain ConnectionString and RelativeLocation fields')
def blob_refs_to_csvs(blob_refs, # type: Dict[str, Dict[str, str]]
charset=None, # type: str
requests_session=None # type: Session
):
# type: (...) -> Dict[str, str]
"""
:param blob_refs:
:param charset:
:param requests_session: an optional Session object that should be used for the HTTP communication
:return:
"""
validate('blob_refs', blob_refs, instance_of=dict)
return {blobName: blob_ref_to_csv(csvBlobRef, encoding=charset, blob_name=blobName,
requests_session=requests_session)
for blobName, csvBlobRef in blob_refs.items()}
def df_to_blob_ref(df, # type: pd.DataFrame
blob_service, # type: BlockBlobService
blob_container, # type: str
blob_name, # type: str
blob_path_prefix=None, # type: str
charset=None # type: str
):
# type: (...) -> Dict[str, str]
"""
Uploads the provided DataFrame to the selected Blob Storage service as a CSV file blob, and returns a reference
to the created blob in case of success.
:param df:
:param blob_service: the BlockBlobService to use, defining the connection string
:param blob_container: the name of the blob storage container to use. This is the "root folder" in azure blob
storage wording.
:param blob_name: the "file name" of the blob, ending with .csv or not (in which case the .csv suffix will be
appended)
:param blob_path_prefix: an optional folder prefix that will be used to store your blob inside the container.
For example "path/to/my/"
:param charset: the charset to use to encode the blob (default and recommended: 'utf-8')
:return:
"""
# create the csv
csv_str = df_to_csv(df, df_name=blob_name, charset=charset)
# upload it
return csv_to_blob_ref(csv_str, blob_service=blob_service, blob_container=blob_container,
blob_path_prefix=blob_path_prefix, blob_name=blob_name, charset=charset)
def dfs_to_blob_refs(dfs_dict, # type: Dict[str, pd.DataFrame]
blob_service, # type: BlockBlobService
blob_container, # type: str
blob_path_prefix=None, # type: str
blob_name_prefix=None, # type: str
charset=None # type: str
):
# type: (...) -> Dict[str, Dict[str, str]]
validate('DataFramesDict', dfs_dict, instance_of=dict)
return {blobName: df_to_blob_ref(csvStr, blob_service=blob_service, blob_container=blob_container,
blob_path_prefix=blob_path_prefix, blob_name=blob_name_prefix + blobName,
charset=charset)
for blobName, csvStr in dfs_dict.items()}
def blob_ref_to_df(blob_reference, # type: AzmlBlobTable
blob_name=None, # type: str
encoding=None, # type: str
requests_session=None # type: Session
):
"""
Reads a CSV blob referenced according to the format defined by AzureML, and transforms it into a DataFrame
:param blob_reference: a (AzureML json-like) dictionary representing a table stored as a csv in a blob storage.
:param blob_name: blob name for error messages
:param encoding: an optional encoding to use to read the blob
:param requests_session: an optional Session object that should be used for the HTTP communication
:return:
"""
# TODO copy the blob_ref_to_csv method here and handle the blob in streaming mode to be big blobs
# chunking-compliant. However how to manage the buffer correctly, create the StringIO with correct encoding,
# and know the number of chunks that should be read in pandas.read_csv ? A lot to dig here to get it right...
#
# from io import TextIOWrapper
# contents = TextIOWrapper(buffer, encoding=charset, ...)
# blob = blob_service.get_blob_to_stream(blob_name=name, container_name=container, encoding=charset,
# stream=contents)
blob_content = blob_ref_to_csv(blob_reference, blob_name=blob_name, encoding=encoding,
requests_session=requests_session)
if len(blob_content) > 0:
# convert to DataFrame
return csv_to_df(StringIO(blob_content), blob_name)
else:
# empty blob > empty DataFrame
return
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
上交所网页
上市公司列表
股票
上市A股
上市B股
首次发行待上市股票
暂停/终止上市公司
上市公司信息
上市公司公告
定期报告
退市整理期公司公告
限售股份解限与减持
"""
import random
import time
import logbook
import pandas as pd
from selenium.common.exceptions import NoSuchElementException, ElementNotInteractableException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select, WebDriverWait
from cnswd.utils import data_root, most_recent_path
from cnswd._seleniumwire import make_headless_browser
logger = logbook.Logger('上交所')
MAX_WAIT_SECOND = 10
CHANNEL_MAPS = {
'listedNotice_disc': '上市公司公告',
'fixed_disc': '定期报告',
'delist_disc': '退市整理期公司公告'
}
class SSEPage(object):
"""上交所Api"""
def __init__(self, download_path=data_root('download')):
self.host_url = 'http://www.sse.com.cn'
logger.info('初始化无头浏览器......')
self.driver = make_headless_browser()
self.wait = WebDriverWait(self.driver, MAX_WAIT_SECOND)
def __enter__(self):
return self
def __exit__(self, *args):
self.driver.quit()
def _goto_page(self, num, input_id, btn_id):
"""跳转到指定页数的页面
Arguments:
num {int} -- 页数
input_id {str} -- 输入页码id
btn_id {str} -- 命令按钮id
"""
i = self.driver.find_element_by_id(input_id)
i.clear()
i.send_keys(num)
self.driver.find_element_by_id(btn_id).click()
def _read_table(self, num, stock_type):
"""获取第x页的股票列表信息
Arguments:
url {str} -- 网址
num {int} -- 页码序号
Returns:
pd.DataFrame -- 单页股票列表信息
"""
self._goto_page(num, 'ht_codeinput', 'pagebutton')
logger.info(f"{stock_type} 第{num}页")
return pd.read_html(self.driver.page_source, header=0)[0]
def _get_total_page_num(self, id_name='pagebutton', attr_name='page'):
"""获取分页总数
Arguments:
url {str} -- 网址
Keyworandom.random() Arguments:
id_name {str} -- 页数所在id名称 (default: {'pagebutton'})
attr_name {str} -- 页数对应的属性名称 (default: {'page'})
Returns:
integer -- 分页数量
"""
try:
e = self.driver.find_element_by_id(id_name)
return int(e.get_attribute(attr_name))
except NoSuchElementException:
return 0
except ElementNotInteractableException:
return 0
def _to_datetime(self, df, cols):
for col in cols:
df[col] = pd.to_datetime(
df[col].values, 'coerce', infer_datetime_format=True)
return df
def _get_item_data(self, suffix, tab_css, info, date_cols, item=None):
"""获取股票列表信息"""
url = self.host_url + suffix
# 严格顺序
# 1.浏览网页
self.driver.get(url)
# 2.等待响应表完成加载
table_css = '.table > tbody:nth-child(1) > tr > th'
locator = (By.CSS_SELECTOR, table_css)
self.wait.until(EC.visibility_of_all_elements_located(locator))
# 3.选择板块
drop_css = 'div.single_select2 > button:nth-child(1)'
label_css_fmt = 'div.single_select2 > div:nth-child(2) > ul:nth-child(1) > li:nth-child({}) > label'
if item:
self.driver.find_element_by_css_selector(drop_css).click()
self.driver.implicitly_wait(0.1)
self.driver.find_element_by_css_selector(
label_css_fmt.format(item)).click()
# 4.转换栏目
if tab_css is not None:
self.driver.find_element_by_css_selector(tab_css).click()
else:
# 此时使用查询按钮
btn_css = '#btnQuery'
self.driver.find_element_by_css_selector(btn_css).click()
# 5.获取页数
total = self._get_total_page_num()
# 6.分页读取
# 如果仅有1页,则不需要循环
if total in (0, 1):
return pd.read_html(self.driver.page_source, header=0)[0]
dfs = []
for i in range(1, total + 1):
df = self._read_table(i, info)
dfs.append(df)
res =
|
pd.concat(dfs)
|
pandas.concat
|
import pandas as pd
from analysis.technical import Technical
from data import store as store
from utils import ui
_logger = ui.get_logger()
class Company:
def __init__(self, ticker: str, days: int, end: int = 0, lazy: bool = True, live: bool = False):
self.ticker = ticker.upper()
self.days = days
self.end = end
self.live = live if store.is_database_connected() else True
self.info = {}
self.history: pd.DataFrame = pd.DataFrame()
self.company = {}
self.price = 0.0
self.volatility = 0.0
self.ta = None
if not store.is_ticker(ticker):
raise ValueError(f'Invalid ticker {ticker}')
if days < 1:
raise ValueError('Invalid number of days')
if end < 0:
raise ValueError('Invalid "end" days')
if not lazy:
self._load_history()
self._load_company()
def __repr__(self):
return f'<Company ({self.ticker})>'
def __str__(self):
return f'{self.ticker}'
def get_last_price(self) -> float:
value = -1.0
if self.history.empty:
if self._load_history():
value = self.history.iloc[-1]['close']
else:
value = self.history.iloc[-1]['close']
return value
def get_high(self) -> pd.Series:
value = pd.Series(dtype=float)
if self.history.empty:
if self._load_history():
value = self.history['high']
else:
value = self.history['high']
return value
def get_low(self) -> pd.Series:
value = pd.Series(dtype=float)
if self.history.empty:
if self._load_history():
value = self.history['low']
else:
value = self.history['low']
return value
def get_close(self) -> pd.Series:
value =
|
pd.Series(dtype=float)
|
pandas.Series
|
from glob import glob
import sys
import pandas as pd
import numpy as np
from loguru import logger
from genomepy.annotation import query_mygene
from genomepy import Genome
__version__ = "0.1.0"
FMTS = {
"kallisto": (
["length", "eff_length", "est_counts", "tpm"],
["tpm", "est_counts", "eff_length"],
),
"salmon": (
["Length", "EffectiveLength", "TPM", "NumReads"],
["TPM", "NumReads", "EffectiveLength"],
),
}
KALLISTO_COLS = ["length", "eff_length", "est_counts", "tpm"]
SALMON_COLS = ["Length", "EffectiveLength", "TPM", "NumReads"]
class TxImport:
def __init__(self):
pass
def _parse_species(self, species):
if species is None:
logger.info("Using default tax_id 9606 (human)")
return 9606
try:
# tax_id
int(species)
logger.info(f"Using tax_id {species}")
return int(species)
except (ValueError, TypeError):
pass
try:
tax_id = species.tax_id
logger.info(f"Using tax_id {tax_id} from genome {species.name}")
return tax_id
except AttributeError:
pass
try:
g = Genome(species)
logger.info(f"Using tax_id {g.tax_id} from genome {species}")
return g.tax_id
except FileNotFoundError:
logger.error(
"Provided species is not a tax_id and I cannot find a genome with the name {species}"
)
logger.error("Don't know what to do now :(")
sys.exit()
def set_tx2gene(self, tx2gene=None, transcript_ids=None, species=None):
if tx2gene:
logger.info("Using provided tx2gene file")
result = pd.read_csv(tx2gene, index_col=0)
result.columns = ["symbol"]
else:
logger.info("Mapping transcripts to genes using mygene.info")
tax_id = self._parse_species(species)
if not isinstance(transcript_ids, pd.Series):
transcript_ids = pd.Series(transcript_ids)
transcripts = transcript_ids.str.replace(
r"\.[\d_]+$", "", regex=True
)
result = query_mygene(
transcripts, tax_id, "symbol", batch_size=10000
)
result = result[["symbol"]]
self.tx2gene = result
def import_files(
self, fnames, sample_names=None, tx2gene=None, species=None
):
"""Convert transcriptome-level quantification to gene_level quantification.
Parameters
----------
fnames : list
List of file names.
sample_names : list, optional
Use these sample names. If not specified, the name of the directories
is used as sample name.
tx2gene : str, optional
Filename of transcript to gene mapping. Should contain the transcript_id
in the first column and the gene_id in the second column. If not specified,
genomepy is used to query mygene to automatically determine the mapping.
species : str, optional
Species to use for mygene query. Human is set as default. Can be taxonomy
id (int or string), a genomepy genome name, or a Genome instance.
"""
if not sample_names:
sample_names = [fname.split("/")[-2] for fname in fnames]
dfs = [pd.read_table(fname, index_col=0) for fname in fnames]
filetype = None
for name, (cols, use_cols) in FMTS.items():
if dfs[0].shape[1] == len(cols) and list(dfs[0].columns) == cols:
filetype = name
TPM, COUNTS, EFFLENGTH = use_cols
break
if not filetype:
logger.error("Unknown filetype")
logger.error(dfs[0].columns)
sys.exit()
logger.info(f"Detected {filetype} files")
tpm = pd.concat([df[TPM] for df in dfs], axis=1)
tpm.columns = sample_names
self.set_tx2gene(tx2gene, tpm.index, species)
counts =
|
pd.concat([df[COUNTS] for df in dfs], axis=1)
|
pandas.concat
|
import os
import pandas
import numpy as np
import nibabel as ni
import itertools
from glob import glob
import statsmodels.distributions.empirical_distribution as ed
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from scipy import stats
from scipy.io import savemat,loadmat
from nilearn import input_data, image
from matplotlib import mlab
from sklearn.utils import resample
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import MinMaxScaler
from statsmodels.sandbox.stats.multicomp import multipletests
#import matlab.engine
import sys
#eng = matlab.engine.start_matlab()
#eng.addpath('../',nargout=0)
def Extract_Values_from_Atlas(files_in, atlas,
mask = None, mask_threshold = 0,
blocking = 'one_at_a_time',
labels = [], sids = [],
output = None,):
'''
This function will extract mean values from a set of images for
each ROI from a given atlas. Returns a Subject x ROI pandas
DataFrame (and csv file if output argument is set to a path).
Use blocking argument according to memory capacity of your
computer vis-a-vis memory requirements of loading all images.
files_in: determines which images to extract values from. Input
can be any of the following:
-- a list of paths
-- a path to a directory containing ONLY files to extract from
-- a search string (with wild card) that would return all
desired images. For example, doing ls [files_in] in a terminal
would list all desired subjects
-- a 4D Nifti image
**NOTE** be aware of the order of file input, which relates to
other arguments
atlas: Path to an atlas, or a Nifti image or np.ndarry of desired
atlas. Or, if doing native space analysis, instead, supply a list
of paths to atlases that match each subject.
NOTE: In this case, The order of this list should be the same
order as subjects in files_in
mask: Path to a binary inclusive mask image. Script will set all
values to 0 for every image where mask voxels = 0. This process
is done before extraction. If doing a native space analysis,
instead, supply a list of paths to masks that match each subject
and each atlas.
mask_threshold: An integer that denotes the minimum acceptable
size (in voxels) of an ROI after masking. This is to prevent
tiny ROIs resulting from conservative masks that might have
spuriously high or low mean values due to the low amount of
information within.
blocking: loading all images to memory at once may not be possible
depending on your computer. Acceptable arguments are:
-- 'one_at_a_time': will extract values from each image
independently. Recommended for memories with poor memory
capacity. Required for native space extraction.
-- 'all_at_once': loads all images into memory at once.
Provides a slight speed up for faster machines overe
one_at_a_time, but is probably not faster than batching (see
below). Only recommended for smaller datasets.
** WARNING ** Not recommended on very large datasets. Will
crash computers with poor memory capacity.
-- any integer: determines the number of images to be read to
memory at once. Recommended for large datasets.
labels: a list of string labels that represent the names of the
ROIs from atlas.
NOTE: ROIs are read consecutively from lowest to highest, and
labels *must* match that order
Default argument [] will use "ROI_x" for each ROI, where X
corresponds to the actual ROI integer lael
sids: a list of subject IDs in the same order as files_in. Default
argument [] will list subjects with consecutive integers.
output: if you wish the resulting ROI values to be written to file,
provide a FULL path. Otherwise, leave as None (matrix will be
returned)
'''
if type(blocking) == str and blocking not in ['all_at_once','one_at_a_time']:
raise IOError('blocking only accepts integers or argumennts of "all_at_once" or "one_at_a_time"')
if type(atlas) == list:
if blocking != 'one_at_a_time':
print('WARNING: you have passed a list of atlases but blocking is not set to one_at_a_time')
print('Lists of atlases are for native space situations where each subject has their own atlas')
print('If you want to test multiple atlases, run the script multiple times with different atlases')
raise IOError('you have passed a list of atlases but blocking is not set to one_at_a_time')
if type(mask) != type(None):
if type(atlas) != type(mask):
raise IOError('for masking, list of masks must be passed that equals length of atlas list')
elif type(mask) == list:
if len(atlas) != len(mask):
raise IOError('list of atlases (n=%s) and masks (n=%s) are unequal'%(len(atlases),
len(masks)))
if type(atlas) != list:
if type(atlas) == str:
try:
atl = ni.load(atlas).get_data()
except:
raise IOError('could not find an atlas at the specified location: %s'%atlas)
elif type(atlas) == ni.nifti1.Nifti1Image:
atl = atlas.get_data()
elif type(atlas) == np.ndarray:
atl = atlas
else:
print('could not recognize atlas filetype. Please provide a path, a NiftiImage object, or an numpy ndarray')
raise IOError('atlas type not recognized')
if blocking == 'all_at_once':
i4d = load_data(files_in, return_images=True).get_data()
if i4d.shape[:-1] != atl.shape:
raise IOError('image dimensions do not match atlas dimensions')
if type(mask) != type(None):
print('masking...')
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
i4d = mask_image_data(i4d, mask_data)
if len(sids) == 0:
sids = range(i4d.shape[-1])
print('extracting values from atlas')
roi_vals = generate_matrix_from_atlas(i4d, atl, labels, sids)
else:
image_paths = load_data(files_in, return_images = False)
if blocking == 'one_at_a_time':
catch = []
for i,image_path in enumerate(image_paths):
if len(sids) > 0:
sid = [sids[i]]
else:
sid = [i]
print('working on subject %s'%sid[0])
img = ni.load(image_path).get_data()
try:
assert img.shape == atl.shape, 'fail'
except:
print('dimensions for subject %s (%s) image did not match atlas dimensions (%s)'%(sid,
img.shape,
atl.shape))
print('skipping subject %s'%sid[0])
continue
if type(mask) != type(None):
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
img = mask_image_data(img, mask_data)
f_mat = generate_matrix_from_atlas(img, atl, labels, sid)
catch.append(f_mat)
roi_vals = pandas.concat(catch)
elif type(blocking) == int:
block_size = blocking
if len(image_paths)%block_size == 0:
blocks = int(len(image_paths)/block_size)
remainder = False
else:
blocks = int((len(image_paths)/blocking) + 1)
remainder = True
catch = []
count = 0
if type(mask) != type(None):
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
for block in range(blocks):
if block == (blocks - 1) and remainder:
print('working on final batch of subjects')
sub_block = image_paths[count:]
else:
print('working on batch %s of %s subjects'%((block+1),block_size))
sub_block = image_paths[count:(count+block_size)]
i4d = load_data(sub_block, return_images = True).get_data()
if i4d.shape[:-1] != atl.shape:
raise IOError('image dimensions (%s) do not match atlas dimensions (%)'%(atl.shape,
i4d.shape[:-1]
))
if type(mask) != type(None):
if len(mask_data.shape) == 4:
tmp_mask = mask_data[:,:,:,:block_size]
else:
tmp_mask = mask_data
i4d = mask_image_data(i4d, tmp_mask)
if block == (blocks - 1) and remainder:
if len(sids) == 0:
sids_in = range(count,i4d.shape[-1])
else:
sids_in = sids[count:]
else:
if len(sids) == 0:
sids_in = range(count,(count+block_size))
else:
sids_in = sids[count:(count+block_size)]
f_mat = generate_matrix_from_atlas(i4d, atl, labels, sids_in)
catch.append(f_mat)
count += block_size
roi_vals = pandas.concat(catch)
else:
image_paths = load_data(files_in, return_images = False)
if len(atlas) != len(image_paths):
raise IOError('number of images (%s) does not match number of atlases (%s)'%(len(image_paths),
len(atlas)))
catch = []
for i,image_path in enumerate(image_paths):
if len(sids) > 0:
sid = [i]
else:
sid = [sids[i]]
print('working on subject'%sid)
img = ni.load(image_path).get_data()
atl = ni.load(atlas[i]).get_data()
if type(mask) != type(None):
mask_data = ni.load(mask[i]).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
img = mask_image_data(img,mask_data)
try:
assert img.shape == atl.shape, 'fail'
except:
print('dimensions for subject %s (%s) image did not match atlas dimensions (%s)'%(sid,
img.shape,
atl.shape
))
print('skipping subject %s'%sid)
continue
f_mat = generate_matrix_from_atlas(img, atl, labels, sid)
catch.append(f_mat)
roi_vals = pandas.concat(catch)
if output:
roi_vals.to_csv(output)
return roi_vals
def generate_matrix_from_atlas(files_in, atl, labels, sids):
if len(files_in.shape) == 3:
x,y,z = files_in.shape
files_in = files_in.reshape(x,y,z,1)
atl = atl.astype(int)
if max(np.unique(atl)) != (len(np.unique(atl)) -1):
atl = fix_atlas(atl)
if len(labels) > 0:
cols = labels
else:
cols = ['roi_%s'%x for x in np.unique(atl) if x != 0]
f_mat = pandas.DataFrame(index = sids,
columns = cols)
tot = np.bincount(atl.flat)
for sub in range(files_in.shape[-1]):
mtx = files_in[:,:,:,sub]
sums = np.bincount(atl.flat, weights = mtx.flat)
rois = (sums/tot)[1:]
f_mat.loc[f_mat.index[sub]] = rois
return f_mat
def load_data(files_in, return_images):
fail = False
if type(files_in) == str:
if os.path.isdir(files_in):
print('It seems you passed a directory')
search = os.path.join(files_in,'*')
flz = glob(search)
num_f = len(flz)
if num_f == 0:
raise IOError('specified directory did not contain any files')
else:
print('found %s images!'%num_f)
if return_images:
i4d = ni.concat_images(flz)
elif '*' in files_in:
print('It seems you passed a search string')
flz = glob(files_in)
num_f = len(flz)
if num_f == 0:
raise IOError('specified search string did not result in any files')
else:
print('found %s images'%num_f)
if return_images:
i4d = ni.concat_images(flz)
else:
fail = True
elif type(files_in) == list:
flz = files_in
print('processing %s subjects'%len(files_in))
if return_images:
i4d = ni.concat_images(files_in)
elif type(files_in) == ni.nifti1.Nifti1Image:
print('processing %s subjects'%files_in.shape[-1])
i4d = files_in
else:
fail = True
if fail:
print('files_in not recognized.',
'Please enter a search string, valid directory, list of paths, or a Nifti object')
raise ValueError('I do not recognize the files_in input.')
if return_images:
return i4d
else:
return flz
def mask_image_data(image_data, mask_data):
if len(image_data.shape) == 3:
if mask_data.shape != image_data.shape:
raise ValueError('dimensions of mask (%s) and image (%s) do not match!'%(mask_data.shape,
image_data.shape))
image_data[mask_data==0] = 0
elif len(image_data.shape) == 4:
if len(mask_data.shape) == 4:
if mask_data.shape != image_data.shape:
raise ValueError('dimensions of mask (%s) and image (%s) do not match!'%(mask_data.shape,
image_data.shape))
else:
masker = mask_data
else:
if mask_data.shape != image_data.shape[:3]:
raise ValueError('dimensions of mask (%s) and image (%s) do not match!'%(mask_data.shape,
image_data.shape[:3]))
masker = np.repeat(mask_data[:, :, :, np.newaxis], image_data.shape[-1], axis=3)
image_data[masker==0] = 0
return image_data
def mask_atlas(mask_data, atlas_data, mask_threshold):
if len(mask_data.shape) == 4:
dim4 = mask_data.shape[-1]
mask_data = mask_data[:,:,:,0]
tfm_4d = True
else:
tfm_4d = False
if max(np.unique(atlas_data)) != (len(np.unique(atlas_data)) -1):
atlas_data = fix_atlas(atlas_data)
mask_atlas = np.array(atlas_data, copy=True)
new_mask = np.array(mask_data, copy=True)
mask_atlas[mask_data == 0] = 0
counts = np.bincount(mask_atlas.astype(int).flat)
labs_to_mask = [x for x in range(len(counts)) if counts[x] < mask_threshold]
for label in labs_to_mask:
new_mask[atlas_data==label] = 0
if tfm_4d:
new_mask = np.repeat(new_mask[:, :, :, np.newaxis], dim4, axis=3)
return new_mask
def fix_atlas(atl):
new_atl = np.zeros_like(atl)
atl_map = dict(zip(np.unique(atl),
range(len(np.unique(atl)))
))
for u in np.unique(atl):
new_atl[atl == u] = atl_map[u]
return new_atl
def Convert_ROI_values_to_Probabilities(roi_matrix, norm_matrix = None,
models = None,
target_distribution = 'right',
outdir = False, fail_behavior = 'nan',
mixed_probability = False, mp_thresh = 0.05):
'''
Will take a Subject x ROI array of values and convert them to probabilities,
using ECDF (monomial distribution) or Gaussian Mixture models (binomial
distribution), with or without a reference sample with the same ROIs.
Returns a Subject x ROI matrix the same size as the input with probability
values. A report is also generated if an argument is passed for models. The
report details which model was selected for each ROI and notes any problems.
roi_matrix -- A subject x ROI matrix. can be pandas Dataframe or numpy array
norm_matrix -- A matrix with the same ROIs as roi_matrix. This sample will
be used to fit the distributions used to calculate the probabilities of
subject in roi_matrix. Norm_matrix and roi_matrix can have overlapping
subjects
if None (default), will use roi_matrix as norm_matrix
models -- a dict object pairing sklearn.gaussian models (values) with
labels describing the models (keys). If more than one model is passed,
for each ROI, model fit between all models will be evaluated and best model
(lowest BIC) will be selected for that ROI.
if None (default), probabilities will be calculated using ECDF.
NOTE: Models with n_components=1 will be calculate probabilities using
ECDF.
NOTE: This script does not currently support models with
n_distributions > 2
target_distribution -- Informs the script whether the target distribution is
expected to have lower values ('left', e.g. gray matter volume) or higher values
('right', e.g. tau-PET). The target distribution is the one for which
probabilities are generated. For example, passing a value of 'right' will give
the probability that a subject falls on the rightmost distribution of values for
a particular ROI.
outdir -- If the resulting probability matrix (and report) should be save to disk,
provide the path to an existing directory.
WARNING: Will overwrite already-existing outcome of this script one already
exists in the passed directory
fail_behavior -- Occasionally, two-component models will find distributions that
are not consistent with the hypothesis presented in target_distribution.
This argument tells the script what to do in such situations:
'nan' will return NaNs for all ROIs that fail
'values' will return probability values from one the distributions (selected
arbitrarily)
mixed_probability -- Experimental setting. If set to True, after calculating
probabilities, for rois with n_components > 1 only, will set all values <
mp_thresh to 0. Remaining values will be put through ECDF. This will create less
of a binarized distribution for n_components > 1 ROIs.
mp_thresh -- Threshold setting for mixed_probability. Must be a float between 0
and 1. Decides the arbitrary probability of "tau positivity". Default is 0.05.
'''
if target_distribution not in ['left','right']:
raise IOError('target_distribution must be set to "left", "right" or None')
if fail_behavior not in ['nan', 'values']:
raise IOError('fail_behavior must be set to "nan" or "values"')
if type(roi_matrix) == pandas.core.frame.DataFrame:
roi_matrix = pandas.DataFrame(roi_matrix,copy=True)
if type(roi_matrix) != pandas.core.frame.DataFrame:
if type(roi_matrix) == np.ndarray:
roi_matrix = np.array(roi_matrix,copy=True)
roi_matrix = pandas.DataFrame(roi_matrix)
else:
raise IOError('roi_matrix type not recognized. Pass pandas DataFrame or np.ndarray')
if mixed_probability:
holdout_mtx = pandas.DataFrame(roi_matrix, copy=True)
if type(norm_matrix) != type(None):
if type(norm_matrix) == pandas.core.frame.DataFrame:
norm_matrix = pandas.DataFrame(norm_matrix,copy=True)
if type(norm_matrix) != pandas.core.frame.DataFrame:
if type(norm_matrix) == np.ndarray:
norm_matrix = np.array(norm_matrix,copy=True)
norm_matrix = pandas.DataFrame(norm_matrix)
else:
raise IOError('roi_matrix type not recognized. Pass pandas DataFrame or np.ndarray')
if norm_matrix.shape[-1] != roi_matrix.shape[-1]:
raise IOError('norm_matrix must have the same number of columns as roi_matrix')
elif all(norm_matrix.columns != roi_matrix.columns):
raise IOError('norm_matrix must have the same column labels as roi_matrix')
else:
norm_matrix = pandas.DataFrame(roi_matrix, copy=True)
results = pandas.DataFrame(index = roi_matrix.index, columns = roi_matrix.columns)
if type(models) == type(None):
for col in roi_matrix.columns:
if not all([x==0 for x in roi_matrix[col]]):
results.loc[:,col] = ecdf_tfm(roi_matrix[col], norm_matrix[col])
if target_distribution == 'left':
results.loc[:,col] = (1 - results.loc[:,col].values)
final_report = None
else:
results.loc[:,col] = [0 for x in range(len(roi_matrix[col]))]
elif type(models) == dict:
for label, model in models.items():
if not hasattr(model, 'predict_proba'):
raise AttributeError('Passed model %s requires the predict_proba attribute'%label)
if not hasattr(model, 'n_components'):
raise AttributeError('Passed model %s requires the n_components attribute'%label)
elif model.n_components > 2:
raise ValueError('Models with > 2 components currently not supported (%s, n=%s)'%(label,
model.n_components))
final_report = pandas.DataFrame(index = roi_matrix.columns,
columns = ['model','n_components','reversed',
'perc. positive','problem'])
for col in roi_matrix.columns:
if not all([x==0 for x in roi_matrix[col]]):
tfm, report_out = model_tfm(roi_matrix[col], norm_matrix[col], models,
target_distribution, fail_behavior)
results.loc[:,col] = tfm
final_report.loc[col,:] = pandas.DataFrame.from_dict(report_out,'index'
).T[final_report.columns].values
fails = len(final_report[final_report.problem!='False']['problem'].dropna())
else:
results.loc[:,col] = [0 for x in range(len(roi_matrix[col]))]
final_report.loc[col,:] = [np.nan for x in range(len(final_report.columns))]
if fails > 0:
print('%s ROIs showed unexpected fitting behavior. See report...'%fails)
else:
raise ValueError('models must be a dict object or must be set to "ecdf". You passed a %s'%(type(models)))
if mixed_probability:
results = mixed_probability_transform(results, holdout_mtx, mp_thresh, final_report)
if type(final_report) == type(None):
if outdir:
results.to_csv(os.path.join(outdir, 'results.csv'))
return results
else:
if outdir:
results.to_csv(os.path.join(outdir, 'results.csv'))
final_report.to_csv(os.path.join(outdir, 'model_choice_report.csv'))
return results, final_report
def ecdf_tfm(target_col, norm_col):
return ed.ECDF(norm_col.values)(target_col.values)
def model_tfm(target_col, norm_col, models, target_distribution, fail_behavior):
report = {}
if len(models.keys()) > 1:
model, label = compare_models(models,norm_col)
else:
model = models[list(models.keys())[0]]
label = list(models.keys())[0]
report.update({'model': label})
report.update({'n_components': model.n_components})
if model.n_components == 1:
tfm = ecdf_tfm(target_col, norm_col)
report.update({'reversed': 'False'})
report.update({'perc. positive': np.nan})
report.update({'problem': 'False'})
else:
fitted = model.fit(norm_col.values.reshape(-1,1))
labs = fitted.predict(target_col.values.reshape(-1,1))
d0_mean = target_col.values[labs==0].mean()
d1_mean = target_col.values[labs==1].mean()
numb = len([x for x in labs if x == 1])/len(target_col)
if target_distribution == 'right':
if d0_mean > d1_mean and numb > 0.5:
report.update({'reversed': 'True'})
report.update({'perc. positive': 1-numb})
report.update({'problem': 'False'})
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,0]
elif d0_mean < d1_mean and numb < 0.5:
report.update({'reversed': 'False'})
report.update({'perc. positive': numb})
report.update({'problem': 'False'})
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,1]
else:
report.update({'reversed': np.nan})
report.update({'perc. positive': np.nan})
report.update({'problem': 'mean of 0s = %s, mean of 1s = %s, perc of 1s = %s'%(
d0_mean, d1_mean, numb)})
if fail_behavior == 'nan':
tfm = [np.nan for x in range(len(target_col))]
elif fail_behavior == 'values':
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,1]
else:
if d0_mean < d1_mean and numb < 0.5:
report.update({'reversed': 'False'})
report.update({'perc. positive': numb})
report.update({'problem': 'False'})
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,0]
elif d0_mean > d1_mean and numb > 0.5:
report.update({'reversed': 'True'})
report.update({'perc. positive': 1-numb})
report.update({'problem': 'False'})
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,1]
else:
report.update({'reversed': np.nan})
report.update({'perc. positive': np.nan})
report.update({'problem': 'mean of 0s = %s, mean of 1s = %s, perc of 1s = %s'%(
d0_mean, d1_mean, numb)})
if fail_behavior == 'nan':
tfm = [np.nan for x in range(len(target_col))]
elif fail_behavior == 'values':
tfm = fitted.predict_proba(target_col.values.reshape(-1,1))[:,0]
return tfm, report
def compare_models(models, norm_col):
modz = []
labs = []
for lab, mod in models.items():
modz.append(mod)
labs.append(lab)
bix = []
for model in modz:
bic = model.fit(norm_col.values.reshape(-1,1)).bic(norm_col.values.reshape(-1,1))
bix.append(bic)
winner_id = np.argmin(bix)
winning_mod = modz[winner_id]
winning_label = labs[winner_id]
return winning_mod, winning_label
def mixed_probability_transform(p_matrix, original_matrix, mp_thresh, report):
for col in original_matrix.columns:
if report.loc[col,'n_components'] == 2:
newcol = pandas.Series(
[0 if p_matrix.loc[x, col] < mp_thresh else original_matrix.loc[x,col] for x in original_matrix.index]
)
if len(newcol[newcol>0]) > 0:
newcol[newcol>0] = ecdf_tfm(newcol[newcol>0], newcol[newcol>0])
p_matrix.loc[:,col] = newcol
return p_matrix
def Evaluate_Model(roi, models, bins=None):
'''
Given an array of values and a dictionary of models, this script
will generate a plot of the fitted distribution(s) from each
model (seperately) over the supplied data.
roi -- an array, series or list values
models -- a dict object of string label: (unfitted) sklearn.gaussian
model pairs
bins -- Number of bins for the histogram.
Passing None (default) sets bin to length(roi) / 2
'''
if type(roi) == np.ndarray or type(roi) == list:
roi = pandas.Series(roi)
plt.close()
if not bins:
bins = int(len(roi)/2)
for label,model in models.items():
mmod = model.fit(roi.values.reshape(-1,1))
if mmod.n_components == 2:
m1, m2 = mmod.means_
w1, w2 = mmod.weights_
c1, c2 = mmod.covariances_
histdist = plt.hist(roi, bins, normed=True)
plotgauss1 = lambda x: plt.plot(x,w1*stats.norm.pdf(x,m1,np.sqrt(c1))[0], linewidth=3, color="black", label="AB Negative")
plotgauss2 = lambda x: plt.plot(x,w2*stats.norm.pdf(x,m2,np.sqrt(c2))[0], linewidth=3, color="red", label="AB Positive")
plotgauss1(histdist[1])
plotgauss2(histdist[1])
elif mmod.n_components == 1:
m1 = mmod.means_
w1 = mmod.weights_
c1 = mmod.covariances_
histdist = plt.hist(roi, bins, normed=True)
plotgauss1 = lambda x: plt.plot(x,w1*stats.norm.pdf(x,m1,np.sqrt(c1))[0][0], linewidth=3, color="black")
plotgauss1(histdist[1])
plt.title(label, fontsize=18)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
plt.legend()
plt.show()
def Plot_Probabilites(prob_matrix, col_order = [], ind_order = [],
vmin=None, vmax=None, figsize=(), cmap=None, ax=None, path=None):
'''
Given the output matrix of Convert_ROI_values_to_Probabilities, will plot
a heatmap of all probability values sorted in such a manner to demonstrate
a progression of values.
'''
## NOTE TO SELF: ADD ARGUMENT FOR FIGSIZE AND THRESHOLDING HEATMAP
## ALSO ARGUMENT TO SORT BY DIFFERENT COLUMNS OR ROWS
if type(prob_matrix) == np.ndarray:
prob_matrix = pandas.DataFrame(prob_matrix)
if len(figsize) == 0:
figsize = (14,6)
elif len(figsize) > 2:
raise IOError('figsize must be a tuple with two values (x and y)')
good_cols = [x for x in prob_matrix.columns if not all([x==0 for x in prob_matrix[x]])]
prob_matrix = prob_matrix[good_cols]
plt.close()
if len(ind_order) == 0:
sorter = pandas.DataFrame(prob_matrix,copy=True)
sorter.loc[:,'mean'] = prob_matrix.mean(axis=1)
ind_order = sorter.sort_values('mean',axis=0,ascending=True).index
if len(col_order) == 0:
sorter2 = pandas.DataFrame(prob_matrix,copy=True)
sorter2.loc['mean'] = prob_matrix.mean(axis=0)
col_order = sorter2.sort_values('mean',axis=1,ascending=False).columns
fig, ax = plt.subplots(figsize=figsize)
forplot = prob_matrix.loc[ind_order, col_order]
g = sns.heatmap(forplot, vmin, vmax, cmap=cmap, ax=ax)
plt.xlabel('Regions (highest - lowest p)', fontsize=24)
plt.ylabel('Subjects (lowest - highest p)', fontsize=24)
if path != None:
plt.yticks([])
plt.tight_layout()
plt.savefig(path)
return [ind_order,col_order]
def Evaluate_Probabilities(prob_matrix, to_test, alpha_threshold = 0.05, FDR=None, info='medium'):
'''
This script will quickly calculate significant (as defined by user)
associations between all columns in a DataFrame or matrix and variables
passed by the user. The script will try to guess the appropriate test to
run. Depending on inputs, the script will display the number of
significant columns, which columns are significant and the alpha values;
for each passed variable.
Multiple comparisons correction is supported.
prob_matrix -- a Subject x ROI matrix or DataFrame
to_test -- a dict object of where values are columns, arrays or lists with
the same length as prob_matrix, and keys are string labels IDing them.
alpha_threshold -- determines what is significant. NOTE: If an argument is
passed for FDR, alpha_threshold refers to Q, otherwise, it refers to p.
FDR -- If no argument is passed (default), no multiple comparisons
correction is performed. If the user desires multiple comparisons correction,
the user can select the type by entering any of the string arguments described
here: http://www.statsmodels.org/0.8.0/generated/statsmodels.sandbox.stats.multicomp.multipletests.html
info -- Determines how much information the script will display upon
completion.
light: script will only display the number of significant regions
medium: script will also display which regions were significnat
heavy: script will also display the alpha value for each region
'''
if info not in ['light','medium','heavy']:
print('WARNING: a value of %s was passed for argument "info"'%(info))
print('Script will proceed with minimal information displayed')
print('in the future, please pass one of the following:')
print('"light", "medium", "heavy"')
info = 'light'
if type(prob_matrix) == np.ndarray:
prob_matrix = pandas.DataFrame(prob_matrix)
good_cols = [x for x in prob_matrix.columns if not all([x==0 for x in prob_matrix[x]])]
prob_matrix = prob_matrix[good_cols]
for label, var in to_test.items():
if type(var) == np.ndarray or type(var) == list:
var = pandas.Series(var)
ps = []
n_vals = len(np.unique(var))
if n_vals < 7:
vals = np.unique(var)
if n_vals == 2:
print('for %s, using t-test...'%(label))
for col in prob_matrix.columns:
p = stats.ttest_ind(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col])[-1]
ps.append(p)
elif n_vals == 3:
print('for %s, using ANOVA...'%(label))
for col in prob_matrix.columns:
p = stats.f_oneway(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col],
prob_matrix.loc[var==vals[2]][col])[-1]
ps.append(p)
elif n_vals == 4:
print('for %s, using ANOVA...'%(label))
for col in prob_matrix.columns:
p = stats.f_oneway(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col],
prob_matrix.loc[var==vals[2]][col],
prob_matrix.loc[var==vals[3]][col])[-1]
ps.append(p)
elif n_vals == 5:
print('for %s, using ANOVA...'%(label))
for col in prob_matrix.columns:
p = stats.f_oneway(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col],
prob_matrix.loc[var==vals[2]][col],
prob_matrix.loc[var==vals[3]][col],
prob_matrix.loc[var==vals[4]][col])[-1]
ps.append(p)
elif n_vals == 6:
print('for %s, using ANOVA...'%(label))
for col in prob_matrix.columns:
p = stats.f_oneway(prob_matrix.loc[var==vals[0]][col],
prob_matrix.loc[var==vals[1]][col],
prob_matrix.loc[var==vals[2]][col],
prob_matrix.loc[var==vals[3]][col],
prob_matrix.loc[var==vals[4]][col],
prob_matrix.loc[var==vals[4]][col])[-1]
ps.append(p)
else:
print('for %s, using correlation...'%(label))
for col in prob_matrix.columns:
p = stats.pearsonr(prob_matrix[col],var)[-1]
ps.append(p)
if not FDR:
hits = [i for i in range(len(ps)) if ps[i] < alpha_threshold]
else:
correction = multipletests(ps,alpha_threshold,FDR)
hits = [i for i in range(len(ps)) if correction[0][i]]
print('=============%s============'%label)
print('for %s, %s regions were significant'%(label,len(hits)))
if info == 'medium':
print(prob_matrix.columns[hits])
if info == 'heavy':
if not FDR:
print([(prob_matrix.columns[i], ps[i]) for i in hits])
else:
print([(prob_matrix.columns[i], correction[1][i]) for i in hits])
print('\n\n')
return ps
def Prepare_Inputs_for_ESM(prob_matrices, ages, output_dir, file_name,
conn_matrices = [], conn_mat_names = [],
conn_out_names = [], epicenters_idx = [],
sub_ids = [], visit_labels = [], roi_labels = [],
figure = True, betas0 = None, deltas0 = None):
'''
This script will convert data into a matfile compatible with
running the ESM, and will print outputs to be entered into
ESM launcher script. The script will also adjust connectomes
to accomodate missing (masked) ROIs.
prob_matrices -- a dict object matching string labels to
probability matrices (pandas DataFrames). These will be
converted into a matlab structure. Columns with all 0s will be
removed automatically.
NOTE: All prob_matrices should the same shape, and a
matching number of non-zero columns. If they do not, run the
script separately for these matrices.
ages -- an array the same length of prob_matrices that contains
the age of each subject.
output_dir -- an existing directory where all outputs will be
written to
file_name -- the name of the output matfile. Do not include a
file extension
conn_matrices -- a list of paths to matfiles or csvs containing
connectomes that match the atlas used to intially extract data.
if your probability matrix does not have columns with 0s
(because, for example, you used a mask), this argument can be
left unchanged. Otherwise, the script will chop up the
connectomes so they match the dimensions of the non-zero columns
in the probability matrices.
NOTE: passing this argument requires passing an argument for
conn_out_names
con_mat_names -- a list the same length of conn_matrices that
contains string labels
'''
if type(prob_matrices) != dict:
raise IOError('prob_matrices must be a dict object')
col_lens = []
for lab, df in prob_matrices.items():
good_cols = [y for y in df.columns if not all([x==0 for x in df[y]])]
col_lens.append(len(good_cols))
prob_matrices.update({lab: df[good_cols].values.T})
if not all([x == col_lens[0] for x in col_lens]):
raise IOError('all probability matrices entered must have the same # of non-zero columns')
goodcols = [y for y in range(len(df.columns)) if not all([x==0 for x in df[df.columns[y]]])]
if len(conn_matrices) > 0:
if not len(conn_matrices) == len(conn_out_names):
raise ValueError('equal length lists must be passed for conn_matrices and out_names')
for i,mtx in enumerate(conn_matrices):
if mtx[-3:] == 'csv':
connmat = pandas.read_csv(mtx)
x,y = connmat.shape
if x < y:
connmat = pandas.read_csv(mtx,header=None)
if all(connmat.loc[:,connmat.columns[0]] == range(connmat.shape[0])):
connmat = pandas.read_csv(mtx, index_col=0).values
x,y = connmat.shape
if x < y:
connmat = pandas.read_csv(mtx, index_col=0, header=None).values
else:
connmat = connmat.values
jnk = {}
elif mtx[-3:] == 'mat':
jnk = loadmat(mtx)
connmat = jnk[conn_mat_names[i]]
newmat = np.array([thing[goodcols] for thing in connmat[goodcols]])
prob_matrices.update({conn_out_names[i]: newmat})
#jnk[file_name] = newmat
#savemat(os.path.join(output_dir,conn_out_names[i]), jnk)
print('new connectivity matrix size: for %s'%conn_out_names[i],newmat.shape)
if figure:
plt.close()
try:
sns.heatmap(newmat)
plt.show()
except:
sns.heatmap(newmat.astype(float))
plt.show()
if type(ages) == np.ndarray or type(ages) == list:
ages = pandas.Series(ages)
if len(ages.dropna()) != len(df):
raise ValueError('length mismatch between "ages" and prob_matrices. Does "ages" have NaNs?')
prob_matrices.update({'ages': ages.values})
elif type(ages) == dict:
for key, ages_list in ages.items():
ages_list = pandas.Series(ages_list)
if len(ages_list.dropna()) != len(df):
raise ValueError('length mismatch between "ages" and prob_matrices. Does "ages" have NaNs?')
prob_matrices.update({key: ages_list.values})
if type(sub_ids) == list:
prob_matrices.update({'sub_ids': sub_ids})
if type(visit_labels) == list:
prob_matrices.update({'visit_labels': visit_labels})
elif type(visit_labels) == dict:
for key, visit_list in visit_labels.items():
visit_list = pandas.Series(visit_list)
if len(visit_list.dropna()) != len(df):
raise ValueError('length mismatch between "visits" and prob_matrices. Does "visits" have NaNs?')
prob_matrices.update({key: visit_list.values})
if type(epicenters_idx) == list:
prob_matrices.update({'epicenters_idx': epicenters_idx})
if type(roi_labels) == list:
prob_matrices.update({'roi_labels': roi_labels})
if type(betas0) == list:
prob_matrices.update({'betas': betas0})
if type(deltas0) == list:
prob_matrices.update({'deltas': deltas0})
fl_out = os.path.join(output_dir,file_name)
savemat(fl_out,prob_matrices)
print('ESM input written to',fl_out)
print('===inputs:===')
for x in prob_matrices.keys():
print(x)
if len(conn_matrices) > 0:
print('===connectivity matrices===')
for i in range(len(conn_matrices)):
print(os.path.join(output_dir,conn_out_names[i]) + '.mat')
def Evaluate_ESM_Results(results, sids, save=True,
labels = None, lit = False, plot = True):
'''
This script will load the matfile outputted from the ESM, will
display the main model results (r2, RMSE and "eval"), the
chosen epicenter(s) and will return the model outputs as a
pandas DataFrame if desired.
results -- a .mat file created using the ESM script
sids -- a list of subject IDs that matches the subjects input to
the ESM
save -- if True, will return a pandas DataFrame with model
results
labels -- ROI labels that match those from the ESM input matrix.
lit -- If only one epicenter was sent (for example, for
hypothesis testing), set this to True. Otherwise, leave as False.
plot -- If True, function will plot several charts to evaluate
ESM results on an ROI and subject level.
'''
mat = loadmat(results)
if not lit:
res = pandas.DataFrame(index = sids)
for i in range(len(mat['ref_pattern'][0])):
# Model fits
sid = sids[i]
r,p = stats.pearsonr(mat['ref_pattern'][:,i], mat['Final_solutions'][:,i])
res.loc[sid,'model_r'] = r
res.loc[sid,'model_r2'] = r**2
res.loc[:, 'model_RMSE'] = mat['Final_RMSEs'].flatten()
res.loc[:, 'model_eval'] = mat['Final_CORRs'].flatten()
if save:
# params
res.loc[:, 'beta'] = mat['Final_parameters'][0,:].flatten()
res.loc[:, 'delta'] = mat['Final_parameters'][1,:].flatten()
res.loc[:, 'sigma'] = mat['Final_parameters'][2,:].flatten()
# other
res.loc[:, 'ref_age'] = mat['AGEs'].flatten()
res.loc[:, 'times'] = mat['Final_times'].flatten()
res.loc[:, 'Onset_age'] = mat['ONSETS_est'].flatten()
print('average r2 = ', res.model_r2.mean())
print('average RMSE =', res.model_RMSE.mean())
print('average eval =', res.model_eval.mean())
if type(labels) != type(None):
if type(labels) == np.ndarray or type(labels) == list:
labels = pandas.Series(labels)
print('model identfied the following epicenters')
for l in mat['models'][0,0][0][0]:
print(labels.loc[labels.index[l-1]])
if plot:
plot_out = Plot_ESM_results(mat, labels, sids, lit)
if save:
if plot:
res = {'model_output': res, 'eval_output': plot_out}
return res
else:
res = pandas.DataFrame(index = sids)
for i in range(len(mat['ref_pattern'][0])):
# Model fits
sid = sids[i]
r,p = stats.pearsonr(mat['ref_pattern'][:,i], mat['model_solutions0'][:,i])
res.loc[sid,'model_r'] = r
res.loc[sid,'model_r2'] = r**2
res.loc[:, 'model_RMSE'] = mat['model_RMSEs0'].flatten()
res.loc[:, 'model_eval'] = mat['model_CORRs0'].flatten()
if save:
# params
res.loc[:, 'beta'] = mat['model_parameters0'][0,:].flatten()
res.loc[:, 'delta'] = mat['model_parameters0'][1,:].flatten()
res.loc[:, 'sigma'] = mat['model_parameters0'][2,:].flatten()
# other
res.loc[:, 'ref_age'] = mat['AGEs'].flatten()
res.loc[:, 'times'] = mat['model_times0'].flatten()
res.loc[:, 'Onset_age'] = mat['ONSETS_est'].flatten()
print('average r2 = ', res.model_r2.mean())
print('average RMSE =', res.model_RMSE.mean())
print('average eval =', res.model_eval.mean())
#if type(labels) != type(None):
# print('model identfied the following epicenters')
# for l in mat['models'][0,0][0][0]:
# print(labels.iloc[l-1]['label'])
if plot:
plot_out = Plot_ESM_results(mat, labels, sids, lit)
if save:
if plot:
res = {'model_output': res, 'eval_output': plot_out}
return res
def Plot_ESM_results(mat, labels, subids, lit):
if not lit:
mat.update({'model_solutions0': mat['Final_solutions']})
sheets = {}
# regional accuracy across subjects
plt.close()
sns.regplot(mat['ref_pattern'].mean(1), mat['model_solutions0'].mean(1))
plt.xlabel('Avg ROI Amyloid Probability Across Subjects')
plt.ylabel('Avg Predicted ROI Amyloid Probability Across Subjects')
plt.title('Regional accuracy across subjects')
plt.show()
r,p = stats.pearsonr(mat['ref_pattern'].mean(1), mat['model_solutions0'].mean(1))
print('r2 = ',r**2,'/n')
fp = pandas.DataFrame(pandas.concat([pandas.Series(mat['ref_pattern'].mean(1)),
pandas.Series(mat['model_solutions0'].mean(1))
], axis = 1))
fp.columns = ['reference','predicted']
if type(labels) != type(None):
fp.loc[:,'labels'] = labels
sheets.update({'regional accuracy': fp})
# Average ROI values across subject
r2s = []
for i in range(mat['ref_pattern'].shape[0]):
r = stats.pearsonr(mat['ref_pattern'][i,:],mat['model_solutions0'][i,:])[0]
r2s.append(r**2)
if type(labels) == type(None):
labels = range(mat['ref_pattern'].shape[0])
roi_test = pandas.concat([pandas.Series(labels).astype(str),pandas.Series(r2s)],
axis=1)
roi_test.columns = ['label','r2']
plt.close()
g = sns.catplot(x='label', y='r2',data=roi_test, ci=None,
order = roi_test.sort_values('r2',ascending=False)['label'])
g.set_xticklabels(rotation=90)
g.fig.set_size_inches((14,6))
plt.title('ROI values across subjects')
plt.show()
print(roi_test.r2.mean())
sheets.update({'ROI_acc': roi_test})
# average subjects across ROIs
r2s = []
for i in range(mat['ref_pattern'].shape[-1]):
r2s.append(stats.pearsonr(mat['ref_pattern'][:,i], mat['model_solutions0'][:,i]
)[0]**2)
sub_test = pandas.concat([pandas.Series(subids).astype(str), pandas.Series(r2s)],
axis=1)
sub_test.columns = ['subid','model_r2']
plt.close()
#sns.set_context('notebook')
#g = sns.factorplot(x='subid', y='model_r2', data=sub_test, ci=None,
#order = sub_test.sort_values('model_r2',ascending=False)['subid'])
#g.set_xticklabels(rotation=90)
#g.fig.set_size_inches((14,6))
#plt.show()
#print(sub_test.model_r2.mean())
return sheets
def Plot_Individual(matrix, index, style='ROI', label = None):
'''
Plot a single ROI across subjects, or a single subject across
ROIs.
matrix -- a dict object representing ESM results
index -- the index of the ROI or subject to plot
style -- set to 'ROI' or 'subject'
label -- Title to put over the plot
'''
if style not in ['ROI', 'subject']:
raise IOError('style argument must be set to "ROI" or "subject"')
if 'Final_solutions' not in matrix.keys():
matrix.update({'Final_solutions': matrix['model_solutions0']})
if style == 'ROI':
x = matrix['ref_pattern'][index,:]
y = matrix['Final_solutions'][index,:]
else: # subject
x = matrix['ref_pattern'][:,index]
y = matrix['Final_solutions'][:,index]
plt.close()
sns.regplot(x,y)
plt.xlabel('Observed')
plt.ylabel('Predicted')
if label:
plt.title(label)
plt.show()
def Prepare_PET_Data(files_in, atlases, ref = None, msk = None, dimension_reduction = False,
ECDF_in = None, output_type = 'py', out_dir = './', out_name = 'PET_data',
save_matrix = False, save_ECDF = False, save_images = False, ref_index = [],
mx_model = 0, orig_atlas = None, esm2014method_py = False, orig_prob_method_matlab = False):
''' This is a function that will take several PET images and an atlas and will
return a subject X region matrix. If specified, the function will also calculate
probabilities (via ECDF) either voxelwise, or using a specified reference region
files_in = input can either be
- a path to a directory full of (only) nifti images OR
- a "search string" using wildcards
- a list of subject paths OR
- a subject X image matrix
atlas = multiple options:
- a path to a labeled regional atlas in the same space as the PET data
- if analysis was done in native space, a path to a list of labeled regional atlases
ref = multiple options:
- If None, no probabilities will be calculated, and script will simply extract
regional PET data using the atlas.
- If a path to a reference region mask, will calculate voxelwise probabilities
based on values within the reference region. Mask must be in the same space as
as PET data and atlas
- List of paths to reference region masks in native space. Voxelwise probabilities
will be calculated based on values within the reference region.
- If a list of integers, will combine these atlas labels with these integers to
make reference region
- if 'voxelwise', voxelwise (or atom-wise from dimension reduction) probabilities
will be estimated. In other words, each voxel or atom will use serve as its own
reference.
msk = multiple options:
- A path to a binary mask file in the same space as PET data and atlas. If None,
mask will be computed as a binary mask of the atlas.
** PLEASE NOTE: The mask will be used to mask the reference region! **
dimension_reduction = whether or not to first reduce dimensions of data using
hierarchical clustering. This results in an initial step that will be very slow, but
will may result in an overall speedup for the script, but perhaps only if ref is set
to 'voxelwise'.
- If None, do not perform dimension reduction
- If integer, the number of atoms (clusters) to reduce to
ECDF_in = If the user wishes to apply an existing ECDF to the PET data instead of
generating one de novo, that can be done here. This crucial if the user wishes to
use multiple datasets. Think of it like scaling in machine learning.
- If None, will generate ECDF de novo.
- If np.array, will use this array to generate the ECDF.
- If statsmodel ECDF object, will use this as ECDF
- If a path, will use the
output_type = type of file to save final subject x region matrix into. multiple options:
-- 'py' will save matrix into a csv
-- 'mat' will save matrix into a matfile
out_dir = location to save output files. Defaults to current directory
out_name = the prefix for all output files
save_matrix = Whether to save or return subject x image matrix. Useful if running multiple
times, as this matrix can be set as files_in, bypassing the costly data import
-- if 'return', will return subject x image matrix to python environment
-- if 'save', will write subject x image matrix to file.
-- if None, matrix will not be stored
save_ECDF = whether to save the ECDF used to create the probabilities. This is crucial if
using multiple datasets. The resulting output can be used as input for the ECDF argument.
-- if 'return, will return np.array to python environment
-- if 'save', will write array to file
-- if None, array will not be stored
'''
# Check input arguments
print('initiating...')
if output_type != 'py' and output_type != 'mat':
raise IOError('output_type must be set to py or mat')
# Initialize variables
# Load data
print('loading data...')
i4d = load_data(files_in, return_images=False) # load PET data
if save_matrix == 'save':
otpt = os.path.join(out_dir,'%s_4d_data'%out_name)
print('saving 4d subject x scan to nifti image: \n',otpt)
i4d.to_filename(otpt)
# load atlas
if type(atlases) != list:
if type(atlases) == str:
try:
atlas = ni.load(atlases).get_data().astype(int)
except:
raise IOError('could not find an atlas at the specified location: %s'%atlas)
if orig_atlas == True:
orig_atlas = np.array(atlas, copy=True)
if atlas.shape != i4d.shape[:3]:
raise ValueError('atlas dimensions do not match PET data dimensions')
# load reference region
if type(ref) == str and ref != 'voxelwise':
print('looking for reference image...')
if not os.path.isdir(ref):
raise IOError('Please enter a valid path for ref, or select a different option for this argument')
else:
ref_msk = ni.load(ref).get_data()
if ref_msk.shape != i4d.shape[:3]:
raise ValueError('ref region image dimensions do not match PET data dimensions')
elif type(ref) == list:
ref_msk = np.zeros_like(atlas)
for i in ref:
ref_msk[atlas == i] = 1
else:
ref_msk = None
# Mask data
print('masking data...')
if msk == None:
img_mask = np.array(atlas,copy=True)
img_mask[img_mask<1] = 0
img_mask[img_mask>0] = 1
else:
img_mask = ni.load(msk).get_data()
atlas[img_mask < 1] = 0
if type(ref_msk) != type(None):
ref_msk[img_mask < 1] = 0
mask_tfm = input_data.NiftiMasker(ni.Nifti1Image(img_mask,i4d.affine))
mi4d = mask_tfm.fit_transform(i4d)
# dimension reduction (IN BETA!)
if dimension_reduction:
print('reducing dimensions...')
shape = img_mask.shape
connectivity = grid_to_graph(n_x=shape[0], n_y=shape[1],
n_z=shape[2], mask=img_mask)
# main ECDF calculation (or mixture model calc)
skip = False
if ref != 'voxelwise':
if type(ECDF_in) != type(None):
print('generating ECDF...')
print('using user-supplied data...')
if type(ECDF_in) == ed.ECDF:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
input_distribution = 'not generated'
elif type(ECDF_in) == np.ndarray:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
input_distribution = ECDF_in
# elif # add later an option for importing an external object
else:
try:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
print('Could not understand ECDF input, but ECDF successful')
input_distribution = 'not generated'
except:
raise IOError(
'Invalid argument for ECDF in. Please enter an ndarray, an ECDF object, or a valid path')
else:
if type(ref_msk) != type(None):
print('generating ECDF...')
ref_tfm = input_data.NiftiMasker(ni.Nifti1Image(ref_msk,i4d.affine))
refz = ref_tfm.fit_transform(i4d)
mi4d_ecdf, ecref = ecdf_simple(mi4d, refz, mx=mx_model)
input_distribution = refz.flat
else:
print('skipping ECDF...')
skip = True
else:
print('generating voxelwise ECDF...')
mi4d_ecdf, ECDF_array = ecdf_voxelwise(mi4d, ref_index, save_ECDF, mx=mx_model)
input_distribution = 'not generated'
if not skip:
# if save_ECDF:
# create an array and somehow write it to a file
# transform back to image-space
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d_ecdf)
else:
#if type(ECDF):
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d)
# generate output matrix
print('generating final subject x region matrix')
if type(orig_atlas) == type(None):
f_mat = generate_matrix_from_atlas_old(f_images, atlas)
else:
f_mat = generate_matrix_from_atlas_old(f_images, orig_atlas)
else:
if len(atlases) != len(files_in):
raise IOError('number of images (%s) does not match number of atlases (%s)'%(len(files_in),
len(atlases)))
type_ref_int = True
if isinstance(ref, list):
if all(isinstance(x, int) for x in ref):
print("Passing in a list of integers to specify the reference region.")
elif all(isinstance(x, str) for x in ref):
if len(ref) == len(files_in):
type_ref_int = False
print("Passing in a list of paths to reference region masks in native space")
else:
raise IOError(
'number of images (%s) does not match number of ref region masks (%s)' % len(files_in),
len(ref))
catch = []
for i in range(0, len(files_in)):
print(files_in[i])
i4d = ni.load(files_in[i])
atlas = ni.load(atlases[i]).get_data()
if len(atlas.shape) == 4:
atlas = np.reshape(atlas, atlas.shape[:3])
if atlas.shape != i4d.shape[:3]:
raise ValueError('atlas dimensions do not match PET data dimensions')
if type_ref_int:
ref_msk = np.zeros_like(atlas)
for i in ref:
ref_msk[atlas == i] = 1
else:
ref_msk = ni.load(ref[i]).get_data()
if len(ref_msk.shape) == 4:
ref_msk = np.reshape(ref_msk, ref_msk.shape[:3])
if ref_msk.shape != i4d.shape[:3]:
raise ValueError('ref region image dimensions do not match PET data dimensions')
if type(ref_msk) != type(None):
ref_msk[ref_msk < 1] = 0
ref_msk[ref_msk > 0] = 1
# Mask data
if msk == None:
img_mask = np.array(atlas, copy=True)
img_mask[img_mask < 1] = 0
img_mask[img_mask > 0] = 1
else:
img_mask = ni.load(msk).get_data()
atlas[img_mask < 1] = 0
mask_tfm = input_data.NiftiMasker(ni.Nifti1Image(img_mask, i4d.affine))
mi4d = mask_tfm.fit_transform(i4d)
#Calculate voxelwise ECDF with respect to ref region in native space
skip = False
if type(ECDF_in) == type(None):
if type(ref_msk) != type(None):
print('generating ECDF...')
ref_tfm = input_data.NiftiMasker(ni.Nifti1Image(ref_msk, i4d.affine))
refz = ref_tfm.fit_transform(i4d)
if esm2014method_py:
mi4d_ecdf, ecref = ecdf_voxelwise_bootstrapped_maxvalues_refregion(mi4d, refz)
elif orig_prob_method_matlab:
mi4d_ecdf = ecdf_voxelwise_bootstrapped_yasser2016(mi4d, refz)
else:
mi4d_ecdf, ecref = ecdf_simple(mi4d, refz, mx=mx_model)
input_distribution = refz.flat
else:
print('skipping ECDF...')
skip = True
if not skip:
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d_ecdf)
else:
# if type(ECDF):
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d)
# generate output matrix
print('generating final subject x region matrix')
if type(orig_atlas) == type(None):
f_mat_single = generate_matrix_from_atlas_old(f_images, atlas)
else:
f_mat_single = generate_matrix_from_atlas_old(f_images, orig_atlas)
#f_mat_single = ecdf_main(mi4d=mi4d, i4d=i4d, atlas=atlas, ref=ref, mask_tfm = mask_tfm)
catch.append(f_mat_single)
f_mat = pandas.concat(catch)
print('preparing outputs')
output = {}
if output_type == 'py':
f_mat.to_csv(os.path.join(out_dir, '%s_roi_data.csv' % out_name), index=False)
output.update({'roi_matrix': f_mat})
else:
output.update({'roi_matrix': f_mat.values})
output.update({'roi_matrix_columns': f_mat.columns})
if save_matrix == 'return':
output.update({'4d_image_matrix': i4d})
if save_ECDF == 'return':
if output_type == 'py':
output.update({'ECDF_function': ECDF_array})
else:
output.update({'input_distribution': input_distribution})
return output
def ecdf_main(mi4d, i4d, atlas, ref, mask_tfm, mx_model=0, ref_msk=None, save_ECDF=False, ECDF_in=None, ref_index=[],
skip=False, orig_atlas=None):
if ref != 'voxelwise':
if type(ECDF_in) != type(None):
print('generating ECDF...')
print('using user-supplied data...')
if type(ECDF_in) == ed.ECDF:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
input_distribution = 'not generated'
elif type(ECDF_in) == np.ndarray:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
input_distribution = ECDF_in
# elif # add later an option for importing an external object
else:
try:
mi4d_ecdf, ecref = ecdf_simple(mi4d, ECDF_in, mx=mx_model)
print('Could not understand ECDF input, but ECDF successful')
input_distribution = 'not generated'
except:
raise IOError(
'Invalid argument for ECDF in. Please enter an ndarray, an ECDF object, or a valid path')
else:
if type(ref_msk) != type(None):
print('generating ECDF...')
ref_tfm = input_data.NiftiMasker(ni.Nifti1Image(ref_msk,i4d.affine))
refz = ref_tfm.fit_transform(i4d)
mi4d_ecdf, ecref = ecdf_simple(mi4d, refz, mx=mx_model)
input_distribution = refz.flat
else:
print('skipping ECDF...')
skip = True
else:
print('generating voxelwise ECDF...')
mi4d_ecdf, ECDF_array = ecdf_voxelwise(mi4d, ref_index, save_ECDF, mx=mx_model)
input_distribution = 'not generated'
if not skip:
# if save_ECDF:
# create an array and somehow write it to a file
# transform back to image-space
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d_ecdf)
else:
#if type(ECDF):
print('transforming back into image space')
f_images = mask_tfm.inverse_transform(mi4d)
# generate output matrix
print('generating final subject x region matrix')
if type(orig_atlas) == type(None):
f_mat = generate_matrix_from_atlas_old(f_images, atlas)
else:
f_mat = generate_matrix_from_atlas_old(f_images, orig_atlas)
return f_mat
def load_data_old(files_in):
fail = False
if type(files_in) == str:
if os.path.isdir(files_in):
print('It seems you passed a directory')
search = os.path.join(files_in,'*')
num_f = len(glob(search))
if num_f == 0:
raise IOError('specified directory did not contain any files')
else:
print('found %s images!'%num_f)
i4d = image.load_img(search)
elif '*' in files_in:
print('It seems you passed a search string')
num_f = len(glob(files_in))
if num_f == 0:
raise IOError('specified search string did not result in any files')
else:
print('found %s images'%num_f)
i4d = image.load_img(files_in)
else:
fail = True
elif type(files_in) == list:
print('processing %s subjects'%len(files_in))
i4d = ni.concat_images(files_in)
elif type(files_in) == ni.nifti1.Nifti1Image:
print('processing %s subjects'%files_in.shape[-1])
i4d = files_in
else:
fail = True
if fail:
print('files_in not recognized.',
'Please enter a search string, valid directory, list of subjects, or matrix')
raise ValueError('I do not recognize the files_in input.')
return i4d
def dim_reduction(mi4d, connectivity, dimension_reduction):
ward = FeatureAgglomeration(n_clusters=dimension_reduction/2,
connectivity=connectivity, linkage='ward', memory='nilearn_cache')
ward.fit(mi4d)
ward = FeatureAgglomeration(n_clusters=dimension_reduction,
connectivity=connectivity, linkage='ward', memory='nilearn_cache')
ward.fit(mi4d)
mi4d = ward.transform(mi4d)
return mi4d
def ecdf_voxelwise_bootstrapped_yasser2016(mi4d, refz):
mi4d_ecdf = eng.voxelwise_pet_prob_yasser2016(matlab.double([list(mi4d[0])]), matlab.double([list(refz[0])]))
return mi4d_ecdf
def ecdf_voxelwise_bootstrapped_maxvalues_refregion(mi4d, refz):
refz_max_values = []
for i in range(0, 40000):
resampled_refz = resample(refz.flatten(), n_samples=500, replace=True)
percentile_value = np.percentile(resampled_refz, 95)
refz_max_values.append(percentile_value)
refz_max_array = np.array(refz_max_values)
refz_max_array = np.reshape(refz_max_array, (1, len(refz_max_array)))
mi4d_ecdf, ecref = ecdf_simple(mi4d, refz_max_array, mx=0)
return mi4d_ecdf, ecref
def ecdf_simple(mi4d, refz, mx=0):
if type(refz) == ed.ECDF:
ecref = refz
else:
if len(refz.shape) > 1:
ecref = ed.ECDF(refz.flat)
else:
ecref = ed.ECDF(refz)
print('transforming images...')
if mx == 0:
mi4d_ecdf = ecref(mi4d.flat).reshape(mi4d.shape[0],mi4d.shape[1])
else:
print('are you sure it makes sense to use a mixture model on reference region?')
mod = GaussianMixture(n_components=mx).fit(ecref)
mi4d_ecdf = mod.predict_proba(mi4d.flat)[:,-1].reshape(mi4d.shape[0],mi4d.shape[1])
return mi4d_ecdf, ecref
def ecdf_voxelwise(mi4d, ref_index, save_ECDF, mx=0):
X,y = mi4d.shape
if mx != 0:
mmod = GaussianMixture(n_components=mx)
if len(ref_index) == 0:
if not save_ECDF:
if mx == 0:
mi4d_ecdf = np.array([ed.ECDF(mi4d[:,x])(mi4d[:,x]) for x in range(y)]).transpose()
else:
mi4d_ecdf = np.array([mmod.fit(mi4d[:,x].reshape(-1,1)).predict_proba(mi4d[:,x].reshape(-1,1)
)[:,-1] for x in range(y)]).transpose()
ECDF_array = None
else:
if mx == 0:
ECDF_array = np.array([ed.ECDF(mi4d[:,x]) for x in range(y)]).transpose()
print('transforming data...')
mi4d_ecdf = np.array([ECDF_array[x](mi4d[:,x]) for x in range(y)]
).transpose()
else:
raise IOError('at this stage, cant save mixture model info....sorry...')
else:
if mx == 0:
# if you don't want to include subjects used for reference, un-hash this, hash
# the next line, and fix the "transpose" line so that the data gets back into the matrix properly
#good_ind = [x for x in list(range(X)) if x not in ref_index]
good_ind = range(X)
if not save_ECDF:
mi4d_ecdf = np.array([ed.ECDF(mi4d[ref_index,x])(mi4d[good_ind,x]) for x in range(y)]
).transpose()
ECDF_array = None
else:
ECDF_array = [ed.ECDF(mi4d[ref_index,x]) for x in range(y)]
print('transforming data...')
mi4d_ecdf = np.array([ECDF_array[x](mi4d[good_ind,x]) for x in range(y)]
).transpose()
else:
### COMING SOON!
raise IOError('have not yet set up implementation for mixture models and reg groups')
return mi4d_ecdf, ECDF_array
def generate_matrix_from_atlas_old(files_in, atlas):
files_in = files_in.get_data()
atlas = atlas.astype(int)
f_mat = pandas.DataFrame(index = range(files_in.shape[-1]),
columns = ['roi_%s'%x for x in np.unique(atlas) if x != 0])
tot = np.bincount(atlas.flat)
sorted_cols = []
for sub in range(files_in.shape[-1]):
mtx = files_in[:,:,:,sub]
sums = np.bincount(atlas.flat, weights = mtx.flat)
rois = (sums/tot)[1:]
for i in range(0, len(rois)):
col="roi_" + str(i+1)
sorted_cols.append(col)
if col in list(f_mat.columns):
f_mat.loc[f_mat.index[sub], col] = rois[i]
else:
f_mat.loc[f_mat.index[sub], col] = 0
f_mat = f_mat[sorted_cols]
return f_mat
def W_Transform(roi_matrix, covariates, norm_index = [],
columns = [], verbose = False):
'''
Depending on inputs, this function will either regress selected
variables out of an roi_matrix, or will perform a W-transform on an
roi_matrix.
W-transform is represented as such:
(Pc - A) / SDrc
Where Pc is the predicted value of the roi *based on the covariates
of the norm sample*; A = actual value of the roi; SDrc = standard
deviation of the residuals *or the norm sample*
roi_matrix = a subjects x ROI array
covariates = a subject x covariates array
norm_index = index pointing exclusively to subjects to be used for
normalization. If norm index is passed, W-transformation will be
performed using these subjects as the norm_sample (see equation
above). If no norm_index is passed, covariates will simply be
regressed out of all ROIs.
columns = the columns to use fron the covariate matrix. If none,
all columns if the covariate matrix will be used.
verbose = If True, will notify upon the completion of each ROI
transformation.
'''
if type(roi_matrix) != pandas.core.frame.DataFrame:
raise IOError('roi_matrix must be a subjects x ROIs pandas DataFrame')
if type(covariates) != pandas.core.frame.DataFrame:
raise IOError('covariates must be a subjects x covariates pandas DataFrame')
covariates = clean_time(covariates)
roi_matrix = clean_time(roi_matrix)
if len(columns) > 0:
covs = pandas.DataFrame(covariates[columns], copy=True)
else:
covs = pandas.DataFrame(covariates, copy=True)
if covs.shape[0] != roi_matrix.shape[0]:
raise IOError('length of indices for roi_matrix and covariates must match')
else:
data = pandas.concat([roi_matrix, covs], axis=1)
output = pandas.DataFrame(np.zeros_like(roi_matrix.values),
index = roi_matrix.index,
columns = roi_matrix.columns)
if len(norm_index) == 0:
for roi in roi_matrix.columns:
eq = '%s ~'%roi
for i,col in enumerate(covs.columns):
if i != len(covs.columns) - 1:
eq += ' %s +'%col
else:
eq += ' %s'%col
mod = smf.ols(eq, data = data).fit()
output.loc[:,roi] = mod.resid
if verbose:
print('finished',roi)
else:
for roi in roi_matrix.columns:
eq = '%s ~'%roi
for i,col in enumerate(covs.columns):
if i != len(covs.columns) - 1:
eq += ' %s +'%col
else:
eq += ' %s'%col
mod = smf.ols(eq, data=data.loc[norm_index]).fit()
predicted = mod.predict(data)
w_score = (data.loc[:,roi] - predicted) / mod.resid.std()
output.loc[:,roi] = w_score
if verbose:
print('finished',roi)
return output
def clean_time(df):
df = pandas.DataFrame(df, copy=True)
symbols = ['.','-',' ', ':', '/','&']
ncols = []
for col in df.columns:
for symbol in symbols:
if symbol in col:
col = col.replace(symbol,'_')
ncols.append(col)
df.columns = ncols
return df
def Weight_Connectome(base_cx, weight_cx, method = 'min', symmetric = True,
transform = MinMaxScaler(), transform_when = 'post',
illustrative = False, return_weight_mtx = False):
if method not in ['min','mean','max']:
raise IOError('a value of "min" or "mean" must be passed for method argument')
choices = ['prae','post','both','never']
if transform_when not in choices:
raise IOError('transform_when must be set to one of the following: %s'%choices)
if len(np.array(weight_cx.shape)) == 1 or np.array(weight_cx).shape[-1] == 1:
print('1D array passed. Transforming to 2D matrix using %s method'%method)
weight_cx = create_connectome_from_1d(weight_cx, method, symmetric)
if transform_when == 'pre' or transform_when == 'both':
weight_cx = transform.fit_transform(weight_cx)
if base_cx.shape == weight_cx.shape:
if illustrative:
plt.close()
sns.heatmap(base_cx)
plt.title('base_cx')
plt.show()
plt.close()
sns.heatmap(weight_cx)
plt.title('weight_cx')
plt.show()
weighted_cx = base_cx * weight_cx
if illustrative:
plt.close()
sns.heatmap(weighted_cx)
plt.title('final (weighted) cx')
plt.show()
else:
raise ValueError('base_cx (%s) and weight_cx %s do not have the sampe shape'%(
base_cx.shape,
weight_cx.shape))
if transform_when == 'post' or transform_when == 'both':
transform.fit_transform(weighted_cx)
if return_weight_mtx:
return weighted_cx, weight_cx
else:
return weighted_cx
def create_connectome_from_1d(cx, method, symmetric):
nans = [x for x in range(len(cx)) if not pandas.notnull(cx[x])]
if len(nans) > 1:
raise ValueError('Values at indices %s are NaNs. Cannot compute'%nans)
weight_cx = np.zeros((len(cx),len(cx)))
if method == 'min':
if symmetric:
for i,j in list(itertools.product(range(len(cx)),repeat=2)):
weight_cx[i,j] = min([cx[i],cx[j]])
else:
for i,j in itertools.combinations(range(len(cx)),2):
weight_cx[i,j] = min([cx[i],cx[j]])
rotator = np.rot90(weight_cx, 2)
weight_cx = weight_cx + rotator
elif method == 'mean':
if symmetric:
for i,j in list(itertools.product(range(len(cx)),repeat=2)):
weight_cx[i,j] = np.mean([cx[i],cx[j]])
else:
for i,j in itertools.combinations(range(len(cx)),2):
weight_cx[i,j] = np.mean([cx[i],cx[j]])
rotator = np.rot90(weight_cx, 2)
weight_cx = weight_cx + rotator
elif method == 'max':
if symmetric:
for i,j in list(itertools.product(range(len(cx)),repeat=2)):
weight_cx[i,j] = max([cx[i],cx[j]])
else:
for i,j in itertools.combinations(range(len(cx)),2):
weight_cx[i,j] = max([cx[i],cx[j]])
rotator = np.rot90(weight_cx, 2)
weight_cx = weight_cx + rotator
return weight_cx
def plot_best_epicenter_x_subs(output_files, subs_to_select=None, color="blue",title=None, plot=True, dataset="DIAN"):
clinical_df =
|
pandas.read_csv("../../data/DIAN/participant_metadata/CLINICAL_D1801.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 25 17:54:30 2020
@author: Administrator
"""
import pandas as pd
import numpy as np
import torch
import torch.nn.functional as F
fns = [#'../checkpoints/eval_resnet50_singleview-Loss-ce-tta-0-test.csv',
#
#
# '../checkpoints/19/eval_effnetb4_singleview-Loss-ce-tta-1-test.csv',
# #'../checkpoints/eval_effnetb4_metasingleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/19/eval_effnetb4_metasingleview-Loss-ce-tta-1-test.csv',
#
# '../checkpoints/19/eval_effnetb4_singleview-Loss-ce-tta-1-test.csv',
# #'../checkpoints/eval_effnetb4_metasingleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/19/eval_effnetb4_metasingleview-Loss-ce-tta-1-test.csv',
'../checkpoint/effb4_meta_default_19/eval_effb4_SVMeta-Loss-ce-tta-1-test.csv',
#'../checkpoint/resnet50_meta_default_19/eval_resnet50_SVMeta-Loss-ce-tta-1-test.csv',
# '../checkpoint/effb4_default_19/eval_effb4_SingleView-Loss-ce-tta-1-test.csv',
]
#fns = ['../checkpoints/eval_resnet50_singleview-Loss-ce-tta-0-test.csv',
# '../checkpoints/eval_resnet50_singleview-Loss-ce-tta-1-test.csv'
#
#
# ]
##mean mode
mean_mode = 'mean' #'mean
k_cls = np.array([1.18620359, 2.02358709, 1.01884232, 0.51940186, 0.91387166,
0.28223699, 0.2827877 , 0.44205242])
k_cls =np.minimum(1.0,k_cls)
for idx,fn in enumerate(fns):#
#fn = fns[0]
print('*'*32)
print(fn)
kl1 =
|
pd.read_csv(fn)
|
pandas.read_csv
|
""" estimators.wrapper
Copyright (C) 2020 <NAME> <<EMAIL>>
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pickle
import pandas as pd
import numpy as np
# for evaluating
from sklearn.metrics import r2_score, mean_squared_error
# for GuWrapper
import multiprocessing
import gc
import itertools
# for random sampling
import random
from .utils import *
"""
Start of wrappers
"""
class PooledWrapper(object):
""" Wrapper implementing Gu et al. (2020). After each month, training set increases by one.
Args:
month (int, optional): Optionally only train the model annually.
n (int): Number of models to average over.
train_size (int): How many months to use for training. Default: 213 which is number
of months between Apr 1958 to Dec 1974.
test_size (int): How many months to use for testing. Default: 144 which is number of
months between Jan 1975 to Dec 1986.
hyperparams (dict): Hyperparameters to test.
n_cpu (int): Number of threads.
n_samples (int, optional): Randomly sample from permutations.
If none, search through all permutations.
"""
def __init__(self, month=None, train_size=213, test_size=144,
hyperparams=None, n_cpu=4, n_samples=None):
super(PooledWrapper, self).__init__()
self.month = month
self.train_size = train_size
self.test_size = test_size
self.hyperparams = hyperparams
self.n_cpu = n_cpu
self.n_samples = n_samples
self.estimator = None
def hyperparam_validate(self, train_x, train_y, test_x, test_y, estimator_fn, hyperparams):
""" Search for best hyperparams.
Args:
train_x (pandas.DataFrame): Training set independent variables.
train_y (pandas.DataFrame): Training set dependent variables.
test_x (pandas.DataFrame): Testing set independent variables.
test_y (pandas.DataFrame): Testing set dependent variables.
estimator_fn (function): Estimator function.
hyperparams (dict): Dictionary with hyperparameters.
Returns:
dict: Best hyperparameters.
"""
val_loss = np.inf
results = []
keys = []
permutations = []
for k in hyperparams:
keys.append(k)
v = hyperparams[k]
if not isinstance(v, collections.Iterable):
permutations.append([v])
else:
permutations.append(v)
permutations = list(itertools.product(*permutations))
n = len(permutations)
for i in range(n):
params = dict(zip(keys, permutations[i]))
print('Testing iteration {} params {}'.format(i, params))
estimator = estimator_fn(params)
mse, estimator = estimator.fit_set(train_x, train_y, test_x, test_y)
results.append((mse, params, estimator))
# go through the list and find the best
best_mse = np.inf
best_params = None
best_estimator = None
for mse, params, estimator in results:
if mse < best_mse:
best_mse = mse
best_params = params
best_estimator = estimator
print('Best params {} loss {}', best_params, best_mse)
return best_params, best_estimator
def hyperparam_validate_multithreaded(self, train_x, train_y, test_x, test_y, estimator_fn,
hyperparams):
""" Search for best hyperparams.
Args:
train_x (pandas.DataFrame): Training set independent variables.
train_y (pandas.DataFrame): Training set dependent variables.
test_x (pandas.DataFrame): Testing set independent variables.
test_y (pandas.DataFrame): Testing set dependent variables.
estimator_fn (function): Estimator function.
hyperparams (dict): Dictionary with hyperparameters.
Returns:
dict: Best hyperparameters.
"""
val_loss = np.inf
results = []
keys = []
permutations = []
for k in hyperparams:
keys.append(k)
v = hyperparams[k]
if not isinstance(v, collections.Iterable):
permutations.append([v])
else:
permutations.append(v)
permutations = list(itertools.product(*permutations))
if self.n_samples:
permutations = random.sample(permutations, k=self.n_samples)
print("Selected {}".format(permutations))
n = len(permutations)
with multiprocessing.Pool(processes=min(n, self.n_cpu)) as pool:
for i in range(n):
params = dict(zip(keys, permutations[i]))
print('Testing iteration {} params {}'.format(i, params))
estimator = estimator_fn(params)
mse = pool.apply_async(estimator.fit_set, (train_x, train_y, test_x, test_y))
results.append((mse, params))
# go through the list and find the best
best_mse = np.inf
best_params = None
best_estimator = None
for mse, params in results:
mse, estimator = mse.get()
if mse < best_mse:
best_mse = mse
best_params = params
best_estimator = estimator
print('Best params {} loss {}'.format(best_params, best_mse))
return best_params, best_estimator
def fit_predict(self, df, estimator_fn, x_column, y_column, fill=None, forward=1,
quote_date=None):
""" Wrapper around a dataframe with first index a quote_date and evaluate the estimator
for correlation, mse and r2.
Args:
df (pandas.DataFrame): DataFrame with index on quote_date.
estimator_fn (estimator): Function returning an estimator.
x_column (str or list(str)): Columns to use as X.
y_column (str or list(str)): Column to use as y.
fill (float): Fill nan.
forward (int): Offset the training <-> prediction set by `forward`.
quote_date (datetime): Date of this iteration. Used to determine whether to do
hyperparameter search.
Returns:
pandas.Series: Result of estimator.predict(X).
"""
if isinstance(x_column, str):
x_column = [x_column]
if isinstance(y_column, str):
y_column = [y_column]
if fill is not None:
df = df.fillna(0.)
ts = df.index.get_level_values(level=0).unique().sort_values()
# are there enough data?
if len(ts) < 2:
return None
# split it into train and validation set
if len(ts) < self.test_size + self.train_size:
print('Data length {} is less than train size {} and test size {}'.format(len(ts),
self.train_size, self.test_size))
return None
# indices for start/end of training/validation sets and prediction set
train_start = ts[0]
train_end = ts[self.train_size-1]
test_start = ts[self.train_size]
test_end = ts[min(self.train_size + self.test_size - 1, len(ts)-forward)]
predict_idx = ts[-1]
print('Train {} to {}; test {} to {}; predict {}'.format(train_start, train_end, test_start, test_end, predict_idx))
train_set = df.loc[train_start:train_end]
test_set = df.loc[test_start:test_end]
predict_set = df.loc[[predict_idx]]
print('len(train) {}; len(test) {}; len(predict) {}'.format(train_set.shape[0], test_set.shape[0], predict_set.shape[0]))
self.train_size += 1
train_x = train_set[x_column]
train_y = train_set[y_column]
test_x = test_set[x_column]
test_y = test_set[y_column]
if fill is not None:
train_x = train_x.fillna(fill)
train_y = train_y.fillna(fill)
test_x = test_x.fillna(fill)
test_y = test_y.fillna(fill)
# try and save some memory
df = None
train_set = None
test_set = None
gc.collect()
try:
if (self.estimator is None
or self.month is None
# if we are training on real data, quote_date is a datetime
or (self.month is not None
and (isinstance(quote_date, np.datetime64) or isinstance(quote_date, pd.Timestamp))
and self.month == quote_date.month)
# if we are training on simulation, quote_date is an int
or (self.month is not None
and isinstance(quote_date, np.int64)
and quote_date % 10 == self.month)):
if self.n_cpu > 1:
params, self.estimator = self.hyperparam_validate_multithreaded(train_x, train_y,
test_x, test_y, estimator_fn, self.hyperparams)
else:
params, self.estimator = self.hyperparam_validate(train_x, train_y,
test_x, test_y, estimator_fn, self.hyperparams)
else:
print('Skipping training')
X = predict_set[x_column]
predicted = self.estimator.predict(X)
predicted = pd.Series(predicted, index=predict_set.index)
return predicted
except:
print('Dumping to train_set.pkl and test_set.pkl')
pickle.dump(train_set, open('train_set.pkl', 'wb'))
pickle.dump(test_set, open('test_set.pkl', 'wb'))
raise
def train_validate(ts, xs, ys, estimator, batch_size=2, thread=None):
""" Loops through the data for one estimator, training and validating.
Args:
ts (list(datetime)): List of quote dates corresponding to index of xs and ys.
xs (list(numpy.array)): List of numpy arrays in order.
ys (list(numpy.array)): List of numpy arrays in order.
estimator (function): Estimator.
batch_size (int): Size of each training batch.
Returns:
tuple(pandas.DataFrame, estimator): Estimator used and list of validation loss.
"""
val_loss = []
try:
for t in range(batch_size+1, len(ts)):
print('HP search iteration {}'.format(t))
x = xs[t-batch_size-1:t-1]
y = ys[t-batch_size-1:t-1]
estimator = estimator.fit_validate(x, y)
x = xs[t]
y = ys[t]
y_ = estimator.predict(x)
if y_ is not None:
val = mean_squared_error(y, y_)
val_loss.append([ts[t], val])
val_loss = pd.DataFrame(val_loss, columns=['quote_date', 'val_loss'])
return val_loss, estimator
except:
print('Saving to xs.pkl, ys.pkl')
pickle.dump(xs, open('xs.pkl', 'wb'))
pickle.dump(ys, open('ys.pkl', 'wb'))
raise
class OnlineDataFrameWrapper(object):
""" Generic wrapper for iterating through a dataframe. Based on the first level of index.
Args:
hyperparams (dict): Hyperparameters to try.
n_cpu (int): Number of multithreads.
"""
def __init__(self, hyperparams=None, n_cpu=4):
super(OnlineDataFrameWrapper, self).__init__()
self.hyperparams = hyperparams
self.n_cpu = n_cpu
def hyperparam_validate(self, df, x_column, y_column, estimator_fn, validation_start,
train_size=1, n_samples=None, hyperparams=None):
""" Search for best hyperparams.
Args:
df (pandas.DataFrame): Data.
x_column (str or list(str)): Independent variables.
y_column (str): Dependent variable.
estimator_fn (function): Estimator function.
validation_start (datetime): Date of validation start to the end of data set.
train_size (int): Size of each batch.
n_samples (int, optional): Randomly sample n choices from permutation.
hyperparams (dict): Dictionary with hyperparameters.
Returns:
dict: Best hyperparameters.
"""
if isinstance(x_column, str):
x_column = [x_column]
if isinstance(y_column, str):
y_column = [y_column]
ts = df.index.get_level_values(level=0).unique().sort_values()
batch_size = train_size * 2
# are there enough data?
if len(ts) < batch_size:
return None
xs = []
ys = []
for _, data in df.groupby(level=0):
xs.append(data[x_column].values)
ys.append(data[y_column].values)
results = []
keys = []
permutations = []
for k in hyperparams:
keys.append(k)
v = hyperparams[k]
if not isinstance(v, collections.Iterable):
permutations.append([v])
else:
permutations.append(v)
permutations = list(itertools.product(*permutations))
if n_samples:
permutations = random.sample(permutations, k=n_samples)
print("Selected {}".format(permutations))
n = len(permutations)
val_losses = []
# go through the list and find the best
best_loss = np.inf
best_params = None
best_estimator = None
for i in range(n):
params = dict(zip(keys, permutations[i]))
print('Testing iteration {} params {}'.format(i, params))
estimator = estimator_fn(params)
estimator.set_hp_mode(True)
val_loss, estimator = train_validate(ts, xs, ys, estimator, batch_size, i)
estimator.set_hp_mode(False)
loss = val_loss.loc[val_loss['quote_date'] >= validation_start, 'val_loss'].mean()
val_losses.append((loss, params))
if loss < best_loss:
best_loss = loss
best_params = params
best_estimator = estimator
print(val_losses)
print('Best params {} loss {}'.format(best_params, best_loss))
return best_params, best_estimator
def hyperparam_validate_multithreaded(self, df, x_column, y_column, estimator_fn,
validation_start, train_size=1, n_samples=None, hyperparams=None):
""" Search for best hyperparams.
Args:
df (pandas.DataFrame): Data.
x_column (str or list(str)): Independent variables.
y_column (str): Dependent variable.
estimator_fn (function): Estimator function.
validation_start (datetime): Date of validation start to the end of data set.
train_size (int): Size of each batch.
n_samples (int, optional): Randomly sample n choices from permutation.
hyperparams (dict): Dictionary with hyperparameters.
Returns:
dict: Best hyperparameters.
"""
if isinstance(x_column, str):
x_column = [x_column]
if isinstance(y_column, str):
y_column = [y_column]
ts = df.index.get_level_values(level=0).unique().sort_values()
batch_size = train_size * 2
# are there enough data?
if len(ts) < batch_size:
return None
xs = []
ys = []
for _, data in df.groupby(level=0):
xs.append(data[x_column].values)
ys.append(data[y_column].values)
results = []
keys = []
permutations = []
for k in hyperparams:
keys.append(k)
v = hyperparams[k]
if not isinstance(v, collections.Iterable):
permutations.append([v])
else:
permutations.append(v)
permutations = list(itertools.product(*permutations))
if n_samples:
permutations = random.sample(permutations, k=n_samples)
print("Selected {}".format(permutations))
n = len(permutations)
val_losses = []
with multiprocessing.Pool(processes=min(n, self.n_cpu)) as pool:
for i in range(n):
params = dict(zip(keys, permutations[i]))
print('Testing iteration {} params {}'.format(i, params))
estimator = estimator_fn(params)
estimator.set_hp_mode(True)
result = pool.apply_async(train_validate, (ts, xs, ys, estimator,
batch_size, i))
results.append((result, params))
# go through the list and find the best
best_loss = np.inf
best_params = None
best_estimator = None
for result, params in results:
val_loss, estimator = result.get()
estimator.set_hp_mode(False)
loss = val_loss.loc[val_loss['quote_date'] >= validation_start, 'val_loss'].mean()
val_losses.append((loss, params))
if loss < best_loss:
best_loss = loss
best_params = params
best_estimator = estimator
print(val_losses)
print('Best params {} loss {}'.format(best_params, best_loss))
return best_params, best_estimator
def fit_predict(self, df, estimator, x_column, y_column, fill=None, forward=1,
quote_date=None, **kwargs):
""" Generic wrapper around a dataframe with first index a quote_date.
Args:
df (pandas.DataFrame): DataFrame with index on quote_date.
estimator (function): BaseEstimator compatible estimator.
x_column (str or list(str)): Columns to use as X.
y_column (str or list(str)): Column to use as y.
fill (float): Fill nan.
forward (int): Offset the training <-> prediction set by `forward`.
quote_date (datetime): For record keeping.
**kwargs: Keyword arguments for `estimator`.
Returns:
pandas.Series: Result of estimator.predict(X).
"""
if isinstance(x_column, str):
x_column = [x_column]
if isinstance(y_column, str):
y_column = [y_column]
ts = df.index.get_level_values(level=0).unique().sort_values()
# are there enough data?
if len(ts) < 2:
return None
if fill is not None:
df = df.fillna(fill)
train_set = df.loc[ts[0]:ts[-1-forward]]
predict_set = df.loc[[ts[-1]]]
X = train_set[x_column]
y = train_set[y_column]
try:
estimator = estimator.fit(X, y)
X = predict_set[x_column]
predicted = estimator.predict(X)
predicted =
|
pd.DataFrame(predicted, index=predict_set.index, columns=y_column)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 16:31:58 2021
@author: snoone
"""
import os
import glob
import pandas as pd
import numpy as np
pd.options.mode.chained_assignment = None # default='warn'
OUTDIR2= "D:/Python_CDM_conversion/daily/cdm_out/head"
OUTDIR = "D:/Python_CDM_conversion/daily/cdm_out/obs"
os.chdir("D:/Python_CDM_conversion/daily/.csv/")
extension = 'csv'
#my_file = open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r")
#all_filenames = my_file.readlines()
#print(all_filenames)
##use a list of file name sto run 5000 parallel
#with open("D:/Python_CDM_conversion/hourly/qff/ls1.txt", "r") as f:
#all_filenames = f.read().splitlines()
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
##to start at begining of files
for filename in all_filenames:
##to start at next file after last processe
#for filename in all_filenames[all_filenames.index('SWM00002338.qff'):] :
df=pd.read_csv(filename, sep=",")
##add column headers
df.columns=["Station_ID", "Date", "observed_variable", "observation_value","quality_flag","Measurement_flag","Source_flag","hour"]
df = df.astype(str)
# importing pandas as pd
# filtering the rows where Credit-Rating is Fair
df = df[df["observed_variable"].isin(["SNWD", "PRCP", "TMIN", "TMAX", "TAVG", "SNOW", "AWND", "AWDR", "WESD"])]
df["Source_flag"]=df["Source_flag"]. astype(str)
df['Source_flag'] = df['Source_flag'].str.replace("0","c")
df['Source_flag'] = df['Source_flag'].str.replace("6","n")
df['Source_flag'] = df['Source_flag'].str.replace("7","t")
df['Source_flag'] = df['Source_flag'].str.replace("A","224")
df['Source_flag'] = df['Source_flag'].str.replace("c","161")
df['Source_flag'] = df['Source_flag'].str.replace("n","162")
df['Source_flag'] = df['Source_flag'].str.replace("t","120")
df['Source_flag'] = df['Source_flag'].str.replace("A","224")
df['Source_flag'] = df['Source_flag'].str.replace("a","225")
df['Source_flag'] = df['Source_flag'].str.replace("B","159")
df['Source_flag'] = df['Source_flag'].str.replace("b","226")
df['Source_flag'] = df['Source_flag'].str.replace("C","227")
df['Source_flag'] = df['Source_flag'].str.replace("D","228")
df['Source_flag'] = df['Source_flag'].str.replace("E","229")
df['Source_flag'] = df['Source_flag'].str.replace("F","230")
df['Source_flag'] = df['Source_flag'].str.replace("G","231")
df['Source_flag'] = df['Source_flag'].str.replace("H","160")
df['Source_flag'] = df['Source_flag'].str.replace("I","232")
df['Source_flag'] = df['Source_flag'].str.replace("K","233")
df['Source_flag'] = df['Source_flag'].str.replace("M","234")
df['Source_flag'] = df['Source_flag'].str.replace("N","235")
df['Source_flag'] = df['Source_flag'].str.replace("Q","236")
df['Source_flag'] = df['Source_flag'].str.replace("R","237")
df['Source_flag'] = df['Source_flag'].str.replace("r","238")
df['Source_flag'] = df['Source_flag'].str.replace("S","166")
df['Source_flag'] = df['Source_flag'].str.replace("s","239")
df['Source_flag'] = df['Source_flag'].str.replace("T","240")
df['Source_flag'] = df['Source_flag'].str.replace("U","241")
df['Source_flag'] = df['Source_flag'].str.replace("u","242")
df['Source_flag'] = df['Source_flag'].str.replace("W","163")
df['Source_flag'] = df['Source_flag'].str.replace("X","164")
df['Source_flag'] = df['Source_flag'].str.replace("Z","165")
df['Source_flag'] = df['Source_flag'].str.replace("z","243")
df['Source_flag'] = df['Source_flag'].str.replace("m","196")
station_id=df.iloc[1]["Station_ID"]
##set the value significnace for each variable
df["value_significance"]=""
df['observed_variable'] = df['observed_variable'].str.replace("SNWD","53")
df.loc[df['observed_variable'] == "53", 'value_significance'] = '13'
df['observed_variable'] = df['observed_variable'].str.replace("PRCP","44")
df.loc[df['observed_variable'] == "44", 'value_significance'] = "13"
df.loc[df['observed_variable'] == "TMIN", 'value_significance'] = '1'
df['observed_variable'] = df['observed_variable'].str.replace("TMIN","85")
df.loc[df['observed_variable'] == "TMAX", 'value_significance'] = '0'
df['observed_variable'] = df['observed_variable'].str.replace("TMAX","85")
df.loc[df['observed_variable'] == "TAVG", 'value_significance'] = '2'
df['observed_variable'] = df['observed_variable'].str.replace("TAVG","85")
df['observed_variable'] = df['observed_variable'].str.replace("SNOW","45")
df.loc[df['observed_variable'] == "45", 'value_significance'] = '13'
df['observed_variable'] = df['observed_variable'].str.replace("AWND","107")
df.loc[df['observed_variable'] == "107", 'value_significance'] = '2'
df['observed_variable'] = df['observed_variable'].str.replace("AWDR","106")
df.loc[df['observed_variable'] == "106", 'value_significance'] = '2'
df['observed_variable'] = df['observed_variable'].str.replace("WESD","55")
df.loc[df['observed_variable'] == "55", 'value_significance'] = '13'
df["observation_value"] = pd.to_numeric(df["observation_value"],errors='coerce')
df["original_value"]=df["observation_value"]
df['original_value'] = np.where(df['observed_variable'] == "44",
df['original_value'] / 10,
df['original_value']).round(2)
df['original_value'] = np.where(df['observed_variable'] == "53",
df['original_value'] / 10,
df['original_value']).round(2)
df['original_value'] = np.where(df['observed_variable'] == "85",
df['original_value'] / 10,
df['original_value']).round(2)
df["original_value"] = np.where(df['observed_variable'] == '45',
df['original_value'] / 10,
df['original_value']).round(2)
df['original_value'] = np.where(df['observed_variable'] == '55',
df['original_value'] / 10,
df['original_value']).round(2)
##SET OBSERVED VALUES TO CDM COMPLIANT values
df["observation_value"] = pd.to_numeric(df["observation_value"],errors='coerce')
#df["observed_variable"] = pd.to_numeric(df["observed_variable"],errors='coerce')
#df['observation_value'] = df['observation_value'].astype(int).round(2)
df['observation_value'] = np.where(df['observed_variable'] == "44",
df['observation_value'] / 10,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == "53",
df['observation_value'] / 10,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == "85",
df['observation_value'] / 10 + 273.15,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == '45',
df['observation_value'] / 10,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == '55',
df['observation_value'] / 10,
df['observation_value']).round(2)
df['observation_value'] = np.where(df['observed_variable'] == '107',
df['observation_value'] / 10,
df['observation_value']).round(2)
##set the units for each variable
df["original_units"]=""
df.loc[df['observed_variable'] == "85", 'original_units'] = '350'
df.loc[df['observed_variable'] == "44", 'original_units'] = '710'
df.loc[df['observed_variable'] == "45", 'original_units'] = '710'
df.loc[df['observed_variable'] == "55", 'original_units'] = '710'
df.loc[df['observed_variable'] == "106", 'original_units'] = '731'
df.loc[df['observed_variable'] == "107", 'original_units'] = "320"
df.loc[df['observed_variable'] == "53", 'original_units'] = '715'
##set the original units for each variable
df["units"]=""
df.loc[df['observed_variable'] == "85", 'units'] = '5'
df.loc[df['observed_variable'] == "44", 'units'] = '710'
df.loc[df['observed_variable'] == "45", 'units'] = '710'
df.loc[df['observed_variable'] == "55", 'units'] = '710'
df.loc[df['observed_variable'] == "106", 'units'] = '731'
df.loc[df['observed_variable'] == "107", 'units'] = "320"
df.loc[df['observed_variable'] == "53", 'units'] = '715'
##set each height above station surface for each variable
df["observation_height_above_station_surface"]=""
df.loc[df['observed_variable'] == "85", 'observation_height_above_station_surface'] = '2'
df.loc[df['observed_variable'] == "44", 'observation_height_above_station_surface'] = '1'
df.loc[df['observed_variable'] == "45", 'observation_height_above_station_surface'] = '1'
df.loc[df['observed_variable'] == "55", 'observation_height_above_station_surface'] = '1'
df.loc[df['observed_variable'] == "106", 'observation_height_above_station_surface'] = '10'
df.loc[df['observed_variable'] == "107", 'observation_height_above_station_surface'] = "10"
df.loc[df['observed_variable'] == "53", 'observation_height_above_station_surface'] = "1"
##set conversion flags for variables
df["conversion_flag"]=""
df.loc[df['observed_variable'] == "85", 'conversion_flag'] = '0'
df.loc[df['observed_variable'] == "44", 'conversion_flag'] = '2'
df.loc[df['observed_variable'] == "45", 'conversion_flag'] = '2'
df.loc[df['observed_variable'] == "55", 'conversion_flag'] = '2'
df.loc[df['observed_variable'] == "106", 'conversion_flag'] = '2'
df.loc[df['observed_variable'] == "107", 'conversion_flag'] = "2"
df.loc[df['observed_variable'] == "53", 'conversion_flag'] = "2"
##set conversion method for variables
df["conversion_method"]=""
df.loc[df['observed_variable'] == "85", 'conversion_method'] = '1'
##set numerical precision for variables
df["numerical_precision"]=""
df.loc[df['observed_variable'] == "85", 'numerical_precision'] = '0.01'
df.loc[df['observed_variable'] == "44", 'numerical_precision'] = '0.1'
df.loc[df['observed_variable'] == "45", 'numerical_precision'] = '0.1'
df.loc[df['observed_variable'] == "55", 'numerical_precision'] = '0.1'
df.loc[df['observed_variable'] == "106", 'numerical_precision'] = '0.1'
df.loc[df['observed_variable'] == "107", 'numerical_precision'] = "0.1"
df.loc[df['observed_variable'] == "53", 'numerical_precision'] = "1"
df["original_precision"]=""
df.loc[df['observed_variable'] == "85", 'original_precision'] = '0.1'
df.loc[df['observed_variable'] == "44", 'original_precision'] = '0.1'
df.loc[df['observed_variable'] == "45", 'original_precision'] = '0.1'
df.loc[df['observed_variable'] == "55", "original_precision"] = '0.1'
df.loc[df['observed_variable'] == "106", 'original_precision'] = '1'
df.loc[df['observed_variable'] == "107", 'original_precision'] = "0.1"
df.loc[df['observed_variable'] == "53", 'original_precision'] = "1"
#add all columns for cdmlite
df['year'] = df['Date'].str[:4]
df['month'] = df['Date'].map(lambda x: x[4:6])
df['day'] = df['Date'].map(lambda x: x[6:8])
df["hour"] ="00"
df["Minute"]="00"
df["report_type"]="3"
df["source_id"]=df["Source_flag"]
df["date_time_meaning"]="1"
df["observation_duration"]="13"
df["platform_type"]=""
df["station_type"]="1"
df["observation_id"]=""
df["data_policy_licence"]=""
df["primary_station_id"]=df["Station_ID"]
df["qc_method"]=df["quality_flag"].astype(str)
df["quality_flag"]=df["quality_flag"].astype(str)
df["crs"]=""
df["z_coordinate"]=""
df["z_coordinate_type"]=""
df["secondary_variable"]=""
df["secondary_value"]=""
df["code_table"]=""
df["sensor_id"]=""
df["sensor_automation_status"]=""
df["exposure_of_sensor"]=""
df["processing_code"]=""
df["processing_level"]="0"
df["adjustment_id"]=""
df["traceability"]=""
df["advanced_qc"]=""
df["advanced_uncertainty"]=""
df["advanced_homogenisation"]=""
df["advanced_assimilation_feedback"]=""
df["source_record_id"]=""
df["location_method"]=""
df["location_precision"]=""
df["z_coordinate_method"]=""
df["bbox_min_longitude"]=""
df["bbox_max_longitude"]=""
df["bbox_min_latitude"]=""
df["bbox_max_latitude"]=""
df["spatial_representativeness"]=""
df["original_code_table"]=""
df["report_id"]=""
###set quality flag to pass 0 or fail 1
#df.loc[df['quality_flag'].notnull(), "quality_flag"] = "1"
#df = df.fillna("Null")
df.quality_flag[df.quality_flag == "nan"] = "0"
df.quality_flag = df.quality_flag.str.replace('D', '1') \
.str.replace('G', '1') \
.str.replace('I', '1')\
.str.replace('K', '1')\
.str.replace('L', '1')\
.str.replace('M', '1')\
.str.replace('N', '1')\
.str.replace('O', '1')\
.str.replace('R', '1')\
.str.replace('S', '1')\
.str.replace('T', '1')\
.str.replace('W', '1')\
.str.replace('X', '1')\
.str.replace('Z', '1')\
.str.replace('H', '1')\
.str.replace('P', '1')
#print (df.dtypes)
##add timestamp to df and cerate report id
df["Timestamp2"] = df["year"].map(str) + "-" + df["month"].map(str)+ "-" + df["day"].map(str)
df["Seconds"]="00"
df["offset"]="+00"
df["date_time"] = df["Timestamp2"].map(str)+ " " + df["hour"].map(str)+":"+df["Minute"].map(str)+":"+df["Seconds"].map(str)
#df['date_time'] = pd.to_datetime(df['date_time'], format='%Y/%m/%d' " ""%H:%M")
#df['date_time'] = df['date_time'].astype('str')
df.date_time = df.date_time + '+00'
df["dates"]=df["date_time"].str[:-11]
df['primary_station_id_2']=df['primary_station_id'].astype(str)+'-'+df['source_id'].astype(str)
df["observation_value"] = pd.to_numeric(df["observation_value"],errors='coerce')
df = df.astype(str)
df['source_id'] = df['source_id'].astype(str).apply(lambda x: x.replace('.0',''))
df['primary_station_id_2']=df['primary_station_id'].astype(str)+'-'+df['source_id'].astype(str)
##'add in location infromatin ro cdm lite station
df2=pd.read_csv("D:/Python_CDM_conversion/daily/config_files/record_id_dy.csv")
df['primary_station_id_2'] = df['primary_station_id_2'].astype(str)
df2 = df2.astype(str)
df= df2.merge(df, on=['primary_station_id_2'])
df['data_policy_licence'] = df['data_policy_licence_x']
df['data_policy_licence'] = df['data_policy_licence'].astype(str).apply(lambda x: x.replace('.0',''))
df["observation_value"] = pd.to_numeric(df["observation_value"],errors='coerce')
df = df.fillna("null")
df = df.replace({"null":""})
##set up master df to extrcat each variable
df["latitude"] = pd.to_numeric(df["latitude"],errors='coerce')
df["longitude"] = pd.to_numeric(df["longitude"],errors='coerce')
df["latitude"]= df["latitude"].round(3)
df["longitude"]= df["longitude"].round(3)
##add observation id to datafrme
df['observation_id']=df['primary_station_id'].astype(str)+'-'+df['record_number'].astype(str)+'-'+df['dates'].astype(str)
df['observation_id'] = df['observation_id'].str.replace(r' ', '-')
df["observation_id"]=df["observation_id"]+df['observed_variable']+'-'+df['value_significance']
##create report_id fom observation_id
df['report_id']=df['primary_station_id'].astype(str)+'-'+df['record_number'].astype(str)+'-'+df['dates'].astype(str)
station_id2=df.iloc[1]["primary_station_id"]
df['report_id'] = df['report_id'].str.strip()
##reorder df columns
df = df[["observation_id","report_id","data_policy_licence","date_time",
"date_time_meaning","observation_duration","longitude","latitude",
"crs","z_coordinate","z_coordinate_type","observation_height_above_station_surface",
"observed_variable","secondary_variable","observation_value",
"value_significance","secondary_value","units","code_table",
"conversion_flag","location_method","location_precision",
"z_coordinate_method","bbox_min_longitude","bbox_max_longitude",
"bbox_min_latitude","bbox_max_latitude","spatial_representativeness",
"quality_flag","numerical_precision","sensor_id","sensor_automation_status",
"exposure_of_sensor","original_precision","original_units",
"original_code_table","original_value","conversion_method",
"processing_code","processing_level","adjustment_id","traceability",
"advanced_qc","advanced_uncertainty","advanced_homogenisation",
"advanced_assimilation_feedback","source_id"]]
col_list=df [["observation_id","latitude","longitude","report_id","source_id","date_time"]]
hdf=col_list.copy()
##add required columns and set up values etc
hdf[['primary_station_id', 'station_record_number', '1',"2,","3"]] = hdf['report_id'].str.split('-', expand=True)
#hdf["observation_id"]=merged_df["observation_id"]
hdf["report_id"]=df["report_id"]
hdf["application_area"]=""
hdf["observing_programme"]=""
hdf["report_type"]="3"
hdf["station_type"]="1"
hdf["platform_type"]=""
hdf["primary_station_id_scheme"]="13"
hdf["location_accuracy"]="0.1"
hdf["location_method"]=""
hdf["location_quality"]="3"
hdf["crs"]="0"
hdf["station_speed"]=""
hdf["station_course"]=""
hdf["station_heading"]=""
hdf["height_of_station_above_local_ground"]=""
hdf["height_of_station_above_sea_level_accuracy"]=""
hdf["sea_level_datum"]=""
hdf["report_meaning_of_timestamp"]="1"
hdf["report_timestamp"]=""
hdf["report_duration"]="13"
hdf["report_time_accuracy"]=""
hdf["report_time_quality"]=""
hdf["report_time_reference"]="0"
hdf["platform_subtype"]=""
hdf["profile_id"]=""
hdf["events_at_station"]=""
hdf["report_quality"]=""
hdf["duplicate_status"]="4"
hdf["duplicates"]=""
hdf["source_record_id"]=""
hdf ["processing_codes"]=""
hdf['record_timestamp'] = pd.to_datetime('now').strftime("%Y-%m-%d %H:%M:%S")
hdf.record_timestamp = hdf.record_timestamp + '+00'
hdf["history"]=""
hdf["processing_level"]="0"
hdf["report_timestamp"]=df["date_time"]
hdf['primary_station_id_2']=hdf['primary_station_id'].astype(str)+'-'+hdf['source_id'].astype(str)
hdf["duplicates_report"]=hdf["report_id"]+'-'+hdf["station_record_number"].astype(str)
station_id=hdf.iloc[1]["primary_station_id"]
df2=
|
pd.read_csv("D:/Python_CDM_conversion/daily/config_files/record_id_dy.csv")
|
pandas.read_csv
|
#code reference: https://github.com/SebastianMantey/Decision-Tree-from-Scratch
import numpy as np
import pandas as pd
import pickle
import random, re
from pprint import pprint #to print out the tree
#Prepare data for analyis
def cleanData(data):
data["label"] = data.win #change the name of the response variable
data = data.drop(["Unnamed: 0", "X", "player", "action", "gameId", "win"], axis=1) #eliminate unnecessary variables
return data
#check the variable type
def check_var_type(df):
var_type = []
threshold = 3 #threshold to determine if the variable is categorical or not. The number shows unique values in a variable.
for var in df.columns:
if var != "label":
uniqueVal = df[var].unique()
if (type(uniqueVal[0]) == "str") or (len(uniqueVal) <= threshold): #if values are string or if unique values more than the threshold
var_type.append("categorical")
else:
var_type.append("numeric")
return var_type
#Evaluate the probability of winning for each node
def evaluate_leaf_node(df):
label_list = df[:, -1]
label_list = np.sort(label_list)
category, count = np.unique(label_list, return_counts = True) #get the of winning and losing in each node
#calculate probability
if len(count) == 2:
prob_of_winning = count[1]/(count[0] + count[1])
else:
if category[0] == 0:
prob_of_winning = 0
else:
prob_of_winning = 1
return prob_of_winning
#Determine where the potential split can be for a variable
#different ways of split for categorical and numeric variables
def potential_split(df):
pot_split = {}
rows, cols = df.shape #get the shape of dataframe
for col_ind in range(cols - 1): #for each variable
values = df[:, col_ind]
uniqueVal = np.unique(values)
if FEATURE_TYPES[col_ind] == "categorical":
if len(uniqueVal) > 1:
pot_split[col_ind] = uniqueVal #record the possible place for a split
else:
pot_split[col_ind] = []
for i in range(len(uniqueVal)):
if i != 0:
cur_val = uniqueVal[i]
prev_val = uniqueVal[i-1]
add_val = (cur_val + prev_val)/2 #record the possible place for split. Split happens at the mean value of the current value and the previous value
pot_split[col_ind].append(add_val) #add the mean value
return pot_split
#Split the data into two to calculate conditional entropy (conditional entropy calculation does not happen in this function)
def splitData(df, column, val):
data_col = df[:, column]
if FEATURE_TYPES[column] == "categorical":
data_one = df[data_col == val] #data with the value that is fed
data_two = df[data_col != val] #data without the values that is fed
else: #for a numeric variable
data_one = df[data_col <= val] #data with values smaller than the value that is fed
data_two = df[data_col > val] #data with values bigger than the value that is fed
return data_one, data_two
#calculate entropy
def entropy(df):
label_list = df[:, -1]
category, count = np.unique(label_list, return_counts = True) #calculate the number of uniques values
#entropy calculation
prob = count/count.sum()
entropy = -sum(prob*np.log2(prob))
return entropy
#calculate conditional entropy
def conditional_entropy(data_one, data_two):
prob_data_one = len(data_one)/(len(data_one) + len(data_two))
prob_data_two = len(data_two)/(len(data_one) + len(data_two))
cond_entropy = prob_data_one * entropy(data_one) + prob_data_two*entropy(data_two) #calculate conditinal entropy by combining the entropy function
return cond_entropy
#Decide which variable and which value produces the best split
def best_split(df, potential_split_dict):
cond_entropy =float("inf")
best_col = None
best_val = None
for col in potential_split_dict:
for value in potential_split_dict[col]:
data_one, data_two = splitData(df, col, value)
cur_cond_entropy = conditional_entropy(data_one, data_two)
if cur_cond_entropy <= cond_entropy: #choose one that has a smaller conditional entropy
cond_entropy = cur_cond_entropy #update the smallest conditional entropy
best_col = col #update the best column
best_val = value #update the best value
data_one, data_two = splitData(df, best_col, best_val)
return data_one, data_two, best_col, best_val
#calculate if the observations in one node has the same label
#If this function returns true, no more split will happen
def purity(df):
label_list = df[:, -1]
uniqueVal = np.unique(label_list)
if len(uniqueVal) == 1:
return True
else:
return False
#the main function to create decision tree
def rpart(df, minsplit, maxdepth, curdepth = 0):
if curdepth == 0:
data = df.values #convert the pandas data frame into numpy format
else:
data = df
#If observations in one node has the same label or number of observations in one node is too small to be split or max depth is reached
#then evaluate the probability of winning
if (purity(data)) or (len(data) < minsplit) or (curdepth == maxdepth):
prob = evaluate_leaf_node(data)
return prob
else:
curdepth += 1
pot_splits = potential_split(data)
data_one, data_two, best_col, best_val = best_split(data, pot_splits)
feature = COLUMN_HEADERS[best_col]
if FEATURE_TYPES[best_col] == "categorical":
node = "{} = {}".format(feature, best_val) #the first item is the variable to split, and the second item is which value the split takes place
else:
node = "{} <= {}".format(feature, best_val) #the first item is the variable to split, and the second item is which value the split takes place
sub_tree = {node: []}
yes_answer = rpart(data_one, minsplit, maxdepth, curdepth) #recursion on the subsetted data
no_answer = rpart(data_two, minsplit, maxdepth, curdepth) #recursion on the subsetted data
#update the tree
if yes_answer == no_answer:
sub_tree = yes_answer
else:
sub_tree[node].append(yes_answer)
sub_tree[node].append(no_answer)
return sub_tree
#predict the label for one item using the decision tree that is built
def predict(item, tree):
for key in tree.keys():
if re.search('<=', key):
result = re.findall('(.*?) <= (.*)', key) #use regular expression to extract the variable and value to split
var = result[0][0]
value = float(result[0][1])
mark = "numeric"
else:
result = re.findall('(.*?) = (.*)', key)
var = result[0][0]
try:
value = float(result[0][1]) #sometimes, caterogicals variables have numeric values
except:
value = result[0][1]
mark = "categorical"
if mark == "numeric":
if item[var] <= value:
tree = tree[key][0] #go to the left side of the tree
else:
tree = tree[key][1] #go to the left side of the tree
else:
if item[var] == value:
tree = tree[key][0] #go to the left side of the tree
else:
tree = tree[key][1] #go to the left side of the tree
prediction = None
if type(tree) is dict:
prediction = predict(item, tree) #recursion
else:
prediction = tree #return probability
break
return prediction
#function to calculate accuracy, precision, recall
#alpha is the threshold to determine the label based on probability
#alpha = 0.5 means that if the probability of winning is bigger than 0.5, then it is classified as winning
def accuracy(df, prediction, alpha):
label_list = df.loc[:, "label"]
prediction_list = []
if len(label_list) == len(prediction):
for each in prediction:
if each <= alpha:
classification = 0
else:
classification = 1
prediction_list.append(classification)
y_actu = pd.Series(label_list, name='Actual')
y_pred = pd.Series(prediction_list, name='Predicted')
df_confusion = pd.crosstab(y_pred, y_actu) #create a confusion matrix
print(df_confusion)
accuracy = (df_confusion[0][0] + df_confusion[1][1])/ len(label_list)
precision = df_confusion[0][0] / (df_confusion[0][0] + df_confusion[0][1])
recall = df_confusion[0][0] / (df_confusion[0][0] + df_confusion[1][0])
return accuracy, precision, recall
def test():
traindf = pd.read_csv("othello2016W.csv")
heldoutdf =
|
pd.read_csv("othello2017W.csv")
|
pandas.read_csv
|
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# We need to find the leaders by mentions.
# We need to rank leaders by weight given to them by mentions (Building Liquid Ranking).
# Initial weight to all mentions is given as equal.
# Calculate score on basis of above, iterations, updating, also mentions of feed owners
# should be considered while scoring.
import re
import operator
import math
import numpy as np
import pandas as pd
from collections import Counter
df = pd.read_csv('D:/Thesis/crypto_twitter_reddit.csv', index_col=0)
df = df[df.type == 'twitter'] # only twitter data
# Finding mentions in data by feed owners
def feedomen(feedo):
flist = []
df =
|
pd.read_csv('D:/Thesis/crypto_twitter_reddit.csv', index_col=0)
|
pandas.read_csv
|
import os
import pickle
import time
import pandas as pd
from Utils.Get import get_data, get_nan, get_duplicate, get_specs, get_y_data
from Utils.Graphics import plot_time_slider
def protocol(df, dates, time_window_length, non_overlapping_length, pickle_file, time_slider_path):
"""
The protocol of the subject
:param pd.DataFrame df: the used DataFrame
:param list dates: list of 2 elements : the bounds
:param int time_window_length: the length of the window
:param int non_overlapping_length: the number of non overlapping element
:param str pickle_file: name of the pickle file, will be saved in "/Files/Out/Pickles"
:param str time_slider_path: The folder where the plots will be saved
"""
import glob
# Variables
start_time = time.time()
df_specs =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : preprocess.py
@Desc :
@Project : src
@Contact : <EMAIL>
@License : (C)Copyright 2018-2020, 1UVU.COM
@WebSite : 1uvu.com
@Modify Time @Author @Version
------------ ------- --------
2020/09/02 14:41 1uvu 0.0.1
"""
import texthero as hero
import nltk.stem as ns
import pandas as pd
import numpy as np
import re
from utils import stem, similar_replace, remove_chore
from settings import *
def preprocess_pipeline(df: pd.DataFrame) -> pd.DataFrame:
# drop duplicates
# date2year
# fill na and modify invalid value
# topics normalize
ddf = df.copy()
ddf["year"] = year(ddf)
ddf["topics"] = norm(ddf)
ddf = clean(ddf)
ddf = fill(ddf)
ddf = drop(ddf)
return ddf
def clean(df: pd.DataFrame) -> pd.DataFrame:
# topics clean
ddf = df.copy(deep=False)
topics = ddf["topics"]
p_topics = []
for t in topics:
if type(t) is not str:
p_topics.append(nan_str)
continue
t_list = re.split(r",", t)
topic = ",".join(set(t_list))
p_topics.append(topic)
ddf["topics"] = pd.Series(p_topics)
return ddf
def drop(df: pd.DataFrame) -> pd.DataFrame:
"""
:param df:
:return:
:notice: if a series has many na or same values, don't use this func, such as many '0',
because it maybe rm data lines but no duplicated
"""
ddf = df.copy(deep=False)
ddf = ddf.dropna(how='all', subset=["topics", "abstract"])
ddf = ddf[~ddf["topics"].isin([nan_str, "nan", "", np.nan])]
ddf = ddf.drop_duplicates(["abstract"]).drop_duplicates(["url"]).drop_duplicates(["title"])
return ddf
def year(df: pd.DataFrame) -> pd.Series:
ys = []
for _y in df["year"]:
year = str(_y).strip()
if "-" in year and not re.search("[a-z|A-Z]", year):
y = re.split("-", year)[0]
else:
y = re.split(" ", year)[-1]
ys.append(re.split("\.", y)[0])
return pd.Series(ys)
def fill(df: pd.DataFrame) -> pd.DataFrame:
"""
:param df:
:return:
:notice: only 'cite' series be filled by '0', others use 'nothing' to fill
"""
cite = []
for c in df["cite"]:
if pd.isna(c):
cite.append("0")
else:
cite.append(str(re.split(" ", str(c))[0]))
ddf = df.copy(deep=False)
ddf["cite"] =
|
pd.Series(cite)
|
pandas.Series
|
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import OPTICS
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
import matplotlib.cm as cm
from hurst import compute_Hc
from arch.unitroot import ADF
import itertools
import typing
class ClusteringPairSelection:
performance_features: pd.DataFrame = pd.DataFrame()
explained_variance: pd.Series = pd.Series()
_clusters: pd.Series = pd.Series()
pairs_list: typing.List[typing.Tuple]
cointegrated_pairs_list: typing.List[typing.List]
cointegration_result: pd.DataFrame = pd.DataFrame()
filtered_pairs: pd.DataFrame = pd.DataFrame()
spreads: pd.DataFrame = pd.DataFrame()
def __init__(self, price: pd.DataFrame):
price_no_na = price.dropna(axis=1)
n_dropped = price.shape[1] - price_no_na.shape[1]
print(f"Dropped {n_dropped} columns out of {price.shape[1]}")
self.price = price_no_na
self.log_price = np.log(price_no_na)
self.performance = self.log_price.diff().iloc[1:]
self.normal_performance = StandardScaler().fit_transform(self.performance)
def select_pairs(self):
print("Converting prices to features...")
self.returns_to_features(5)
pd.Series(self.explained_variance).plot(kind='bar', title="Cumulative explained variance")
plt.show()
print("Creating clusters....")
self.create_clusters(3)
self.clusters.plot(kind='bar', title='Clusters, % of Allocated samples')
plt.show()
self.plot_clusters()
print("Running cointegration check....")
self.check_cointegration()
print("Estimating selection criteria...")
self._calculate_hurst_exponent()
self._calculate_half_life()
print("Applying filters...")
self._apply_post_cointegration_filters()
def returns_to_features(self, n_components):
pca = PCA(n_components=n_components)
transposed_returns = self.normal_performance.T
pca.fit(transposed_returns)
reduced_returns = pd.DataFrame(transposed_returns.dot(pca.components_.T), index=self.performance.columns)
self.explained_variance = pca.explained_variance_ratio_.cumsum()
self.performance_features = reduced_returns
def create_clusters(self, min_samples):
optics = OPTICS(min_samples=min_samples)
clustering = optics.fit(self.performance_features)
len(clustering.labels_[clustering.labels_ == -1]) / len(clustering.labels_)
classified = pd.Series(clustering.labels_, index=self.performance.columns)
self._clusters = classified
self._create_cluster_based_pairs()
@property
def clusters(self):
clusters = pd.Series(self._clusters.index.values, index=self._clusters)
clusters = clusters.groupby(level=0).count()
clusters /= clusters.sum()
return clusters
@staticmethod
def _npr(n, r=2):
return np.math.factorial(n) / np.math.factorial(n - r)
def _create_cluster_based_pairs(self):
classified = self._clusters[self._clusters != -1]
all_pairs = []
for group_id in classified.sort_values().unique():
group = classified[classified == group_id].index.tolist()
combinations = list(itertools.permutations(group, 2))
all_pairs.extend(combinations)
self.pairs_list = all_pairs
def check_cointegration(self):
results = []
pairs_series = {}
total_pairs_length = len(self.pairs_list)
for i, pair in enumerate(self.pairs_list):
x, y = self.log_price.loc[:, pair].values.T
pair_name = "|".join(pair)
pair_id = "|".join(sorted(pair))
residuals = self._get_residuals(x, y)
adf_test = ADF(residuals, lags=1)
p_value = adf_test.pvalue
test_stat = adf_test.stat
results.append({"id": pair_id, "p_value": p_value, "stat": test_stat, "pair": pair_name})
pairs_series[pair_name] = residuals
current = (i + 1)
print(f"{current}/{total_pairs_length} ({current / total_pairs_length:.2%})", end="\r", flush=True)
pairs_series = pd.DataFrame(pairs_series, index=self.price.index)
results =
|
pd.DataFrame(results)
|
pandas.DataFrame
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.5
# kernelspec:
# display_name: 'Python 3.9.6 64-bit (''boiler-map'': conda)'
# name: python3
# ---
# %%
import numpy as np
import pandas as pd
# %% tags=["parameters"]
product = None
upstream = ["download_synthetic_bers"]
# %%
buildings =
|
pd.read_parquet(upstream["download_synthetic_bers"])
|
pandas.read_parquet
|
# coding: utf-8
import test
import urllib.parse
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import paired_distances,cosine_similarity
import pymysql
import pymongo
#得到相關資訊
def recipetempelete(left):
if len(left) >= 6:
myfinallist = []
for content in left[0:6]:
singlelist = []
name = content[0]
imgurl = content[1]
ingredient = content[2]
singlelist.append(imgurl) #得到網址
singlelist.append(name) #得到名稱
singlelist.append(ingredient) #得到食材
urlname = urllib.parse.quote(name)
#做youtube搜尋
ytname = f'https://www.youtube.com/results?search_query={urlname}'
singlelist.append(ytname) #得到影片網址
#新增食譜連結
if imgurl[0:11] == r'https://img':
url = f"https://cook1cook.com/search?keyword={urlname}&theme=recipe"
singlelist.append(url)#得到食譜
elif imgurl[0:11] == r'https://tok':
url = f'https://icook.tw/search/{urlname}'
singlelist.append(url) #得到食譜
#營養素搜尋
nutrientsentence = ''
for nutrient, weight in content[5].items():
nutrientsentence += (f'{nutrient}含有: {weight}\n')
singlelist.append(nutrientsentence) #得到營養素
myfinallist.append(singlelist) #回傳最終結果回去
return myfinallist
elif left ==[]:
return '{查無符合資料}'
else:
myfinallist = []
for content in left[0:len(left)]:
singlelist = []
name = content[0]
imgurl = content[1]
ingredient = content[2]
singlelist.append(imgurl) # 得到圖片網址
singlelist.append(name) # 得到名稱
singlelist.append(ingredient) # 得到食材
# 做youtube搜尋
ytname = f'https://www.youtube.com/results?search_query={name}'
singlelist.append(ytname) # 得到影片網址
# 新增食譜連結
if imgurl[0:11] == r'https://img':
url = f"https://cook1cook.com/search?keyword={name}&theme=recipe"
singlelist.append(url) # 得到食譜
elif imgurl[0:11] == r'https://tok':
url = f'https://icook.tw/search/{name}'
singlelist.append(url) # 得到食譜
# 營養素搜尋
nutrientsentence = ''
for nutrient, weight in content[5].items():
nutrientsentence += (f'營養素名:{nutrient},重量:{weight}\n')
singlelist.append(nutrientsentence) # 得到營養素
myfinallist.append(singlelist) # 回傳最終結果回去
return myfinallist
# print(len(left))
# print(recipetempelete(left))
########################################################################
#回傳範本
# def input_column(recipes):
# if recipes == '{查無符合資料}':
# # text_message = TextSendMessage(text='查無符合資料')
# # line_bot_api.reply_message(event.reply_token, text_message)
# return "error"
#
# else:
# columnlist=[]
# for recipe in recipes:
#
# column = CarouselColumn(
# thumbnail_image_url=f'{recipe[0]}',
# title=f'{recipe[1]}',
# text=f'{recipe[2]}',
# actions=[
# URITemplateAction(
# label=f'{recipe[1]}作法影片',
# uri=f'{recipe[3]}'),
# URITemplateAction(
# label=f'{recipe[1]}食譜查詢',
# uri= f'{recipe[4]}'),
# MessageTemplateAction(
# label=f'{recipe[1]}營養素',
# text=f'{recipe[5]}')
# ]
# )
# columnlist.append(column)
# # Carousel_template = TemplateSendMessage(
# # alt_text='Carousel template',
# # template=CarouselTemplate(
# # columns=columnlist))
# # line_bot_api.reply_message(event.reply_token,Carousel_template)
# print(columnlist)
# def leftoverRecipe(leftover):
# # SQL settings
# db_settings = {
# 'host': '127.0.0.1',
# 'port': 3306,
# 'user': 'test',
# 'password': '<PASSWORD>',
# 'db': 'my_db'
# }
# # 連接到 MySQL
# conn = pymysql.connect(**db_settings)
# # 讀取 view_recipe_energe
# get_energe = """SELECT id, Calorie_correction, Moisture,
# Crude_protein, Crude_fat, Saturated_fat, Total_carbohydrates
# FROM view_recipe_energe;"""
# energe = pd.read_sql(get_energe, conn)
# energe.columns = ['id', '修正熱量', '水分', '粗蛋白',
# '粗脂肪', '飽和脂肪', '總碳水化合物']
# # 從 view_recipe_energe 取出 recipe 的 id
# cache_id = energe.iloc[:, 0].to_list()
# # 讀取 recipe_to_sql
# get_recipe = f"SELECT * FROM recipe_to_sql;"
# recipes = pd.read_sql(get_recipe, conn)
# # 連接到 MongoDB
# mongo_info = "mongodb+srv://lukelee:<EMAIL>.<EMAIL>/myFirstDatabase?retryWrites=true&w=majority"
# client = pymongo.MongoClient(mongo_info)
# # MongoDB database
# db = client.food
# # MongoDB collection
# monrecipe = db.recipe
# outputs = list()
# for i in cache_id:
# recipe = recipes.loc[recipes["id"] == i]
# mates = recipe["material"].to_list()
# # 若包含該食譜所有食材
# if all(m in leftover for m in mates):
# # get recipe info from MongoDB
# mongoData = monrecipe.find_one({'id': str(i)},
# {'id': 0, '_id': 0, '料理時間': 0, '簡介': 0, '作者': 0})
# output = [v for k,v in mongoData.items()]
# # get recipe_energe info from view_recipe_energe
# recipe_energe = energe.loc[energe["id"] == i].drop(["id"], axis=1).to_dict("records")[0]
# output.append(recipe_energe)
# outputs.append(output)
# return outputs
def recipe_temp(recipes):
if recipes == '{查無符合資料}':
return "error"
else:
temlist = []
for recipe in recipes:
a = {
"type": "bubble",
"size": "micro",
"hero": {
"type": "image",
"url": f"{recipe[0]}",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "320:213"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": f"{recipe[1]}"
},
{
"type": "text",
"text": f"{recipe[2]}",
"weight": "bold",
"size": "sm",
"wrap": True
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "box",
"layout": "baseline",
"spacing": "sm",
"contents": []
},
{
"type": "button",
"action": {
"type": "uri",
"label": '作法影片',
"uri": f'{recipe[3]}'
},
"style": "primary",
"color": "#FF8282"
},
{
"type": "button",
"action": {
"type": "uri",
"label": '食譜查詢',
"uri": f'{recipe[4]}'
},
"style": "primary",
"color": "#FF8C8C"
},
{
"type": "button",
"action": {
"type": "message",
"label": '營養素',
"text": f'{recipe[5]}'
},
"style": "primary",
"color": "#FF9696"
}
]
}
],
"spacing": "sm",
"paddingAll": "13px"
}
}
temlist.append(a)
content = {"type": "carousel", "contents":temlist}
return content
# recipelist = test.left()
# recipes = recipetempelete(recipelist)
# a = recipe_temp(recipes)
# print(a)
# b= f'str({"type": "carousel", "contents":[{}]})'
# print(type(b))
def get_the_most_similar_receipes(searchUserId, num):
receipes = pd.read_csv('./recipe_group.csv')
like = pd.read_csv('./userid_like.csv')
df = pd.merge(like, receipes, on='id')
# 建立receipe的特徵矩陣
oneHot = pd.get_dummies(receipes["Cluster_category"].astype(str)) # One-Hot Encoding
receipe_arr = pd.concat([receipes, oneHot], axis=1)
receipe_arr.drop("Cluster_category", axis=1, inplace=True)
receipe_arr.set_index("id", inplace=True)
# 建立user的特徵矩陣
oneHot = pd.get_dummies(df["Cluster_category"].astype(str)) # One-Hot Encoding
user_arr =
|
pd.concat([df, oneHot], axis=1)
|
pandas.concat
|
from datetime import (
datetime,
timedelta,
timezone,
)
import numpy as np
import pytest
import pytz
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
Timestamp,
date_range,
isna,
)
import pandas._testing as tm
class TestSeriesFillNA:
def test_fillna_nat(self):
series = Series([0, 1, 2, NaT.value], dtype="M8[ns]")
filled = series.fillna(method="pad")
filled2 = series.fillna(value=series.values[2])
expected = series.copy()
expected.values[3] = expected.values[2]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="pad")
filled2 = df.fillna(value=series.values[2])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
series = Series([NaT.value, 0, 1, 2], dtype="M8[ns]")
filled = series.fillna(method="bfill")
filled2 = series.fillna(value=series[1])
expected = series.copy()
expected[0] = expected[1]
tm.assert_series_equal(filled, expected)
tm.assert_series_equal(filled2, expected)
df = DataFrame({"A": series})
filled = df.fillna(method="bfill")
filled2 = df.fillna(value=series[1])
expected = DataFrame({"A": expected})
tm.assert_frame_equal(filled, expected)
tm.assert_frame_equal(filled2, expected)
def test_fillna_value_or_method(self, datetime_series):
msg = "Cannot specify both 'value' and 'method'"
with pytest.raises(ValueError, match=msg):
datetime_series.fillna(value=0, method="ffill")
def test_fillna(self):
ts = Series([0.0, 1.0, 2.0, 3.0, 4.0], index=tm.makeDateIndex(5))
tm.assert_series_equal(ts, ts.fillna(method="ffill"))
ts[2] = np.NaN
exp = Series([0.0, 1.0, 1.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="ffill"), exp)
exp = Series([0.0, 1.0, 3.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(method="backfill"), exp)
exp = Series([0.0, 1.0, 5.0, 3.0, 4.0], index=ts.index)
tm.assert_series_equal(ts.fillna(value=5), exp)
msg = "Must specify a fill 'value' or 'method'"
with pytest.raises(ValueError, match=msg):
ts.fillna()
def test_fillna_nonscalar(self):
# GH#5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.0])
tm.assert_series_equal(result, expected)
result = s1.fillna({})
tm.assert_series_equal(result, s1)
result = s1.fillna(Series((), dtype=object))
tm.assert_series_equal(result, s1)
result = s2.fillna(s1)
tm.assert_series_equal(result, s2)
result = s1.fillna({0: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna({1: 1})
tm.assert_series_equal(result, Series([np.nan]))
result = s1.fillna({0: 1, 1: 1})
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}))
tm.assert_series_equal(result, expected)
result = s1.fillna(Series({0: 1, 1: 1}, index=[4, 5]))
tm.assert_series_equal(result, s1)
def test_fillna_aligns(self):
s1 = Series([0, 1, 2], list("abc"))
s2 = Series([0, np.nan, 2], list("bac"))
result = s2.fillna(s1)
expected = Series([0, 0, 2.0], list("bac"))
tm.assert_series_equal(result, expected)
def test_fillna_limit(self):
ser = Series(np.nan, index=[0, 1, 2])
result = ser.fillna(999, limit=1)
expected = Series([999, np.nan, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
result = ser.fillna(999, limit=2)
expected = Series([999, 999, np.nan], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
def test_fillna_dont_cast_strings(self):
# GH#9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ["0", "1.5", "-0.3"]
for val in vals:
ser = Series([0, 1, np.nan, np.nan, 4], dtype="float64")
result = ser.fillna(val)
expected = Series([0, 1, val, val, 4], dtype="object")
tm.assert_series_equal(result, expected)
def test_fillna_consistency(self):
# GH#16402
# fillna with a tz aware to a tz-naive, should result in object
ser = Series([Timestamp("20130101"), NaT])
result = ser.fillna(Timestamp("20130101", tz="US/Eastern"))
expected = Series(
[Timestamp("20130101"), Timestamp("2013-01-01", tz="US/Eastern")],
dtype="object",
)
tm.assert_series_equal(result, expected)
msg = "The 'errors' keyword in "
with tm.assert_produces_warning(FutureWarning, match=msg):
# where (we ignore the errors=)
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = ser.where(
[True, False], Timestamp("20130101", tz="US/Eastern"), errors="ignore"
)
tm.assert_series_equal(result, expected)
# with a non-datetime
result = ser.fillna("foo")
expected = Series([Timestamp("20130101"), "foo"])
tm.assert_series_equal(result, expected)
# assignment
ser2 = ser.copy()
ser2[1] = "foo"
tm.assert_series_equal(ser2, expected)
def test_fillna_downcast(self):
# GH#15277
# infer int64 from float64
ser = Series([1.0, np.nan])
result = ser.fillna(0, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
ser = Series([1.0, np.nan])
result = ser.fillna({1: 0}, downcast="infer")
expected = Series([1, 0])
tm.assert_series_equal(result, expected)
def test_timedelta_fillna(self, frame_or_series):
# GH#3371
ser = Series(
[
Timestamp("20130101"),
Timestamp("20130101"),
Timestamp("20130102"),
Timestamp("20130103 9:01:01"),
]
)
td = ser.diff()
obj = frame_or_series(td)
# reg fillna
result = obj.fillna(
|
Timedelta(seconds=0)
|
pandas.Timedelta
|
import pandas as pd
from collections import namedtuple
from functools import lru_cache
from typing import Any, Type, List, Iterable, Optional # noqa
from backlight.datasource.marketdata import MarketData
Transaction = namedtuple("Transaction", ["timestamp", "amount"])
def _max(s: pd.Series) -> int:
if len(s) == 0:
return 0
return max(s)
class Trades(pd.DataFrame):
"""A collection of trades.
This is designed to achieve following purposes
1. Compute metrics which need individual trade perfomance
s.t. win_rate and lose_rate.
2. Filter the trades.
"""
_metadata = ["symbol"]
_target_columns = ["amount", "_id"]
@property
def ids(self) -> List[int]:
"""Return all unique ids"""
if "_id" not in self.columns:
return []
return self._id.unique().tolist()
@property
def amount(self) -> pd.Series:
"""Flattend as one Trade"""
a = self["amount"]
return a.groupby(a.index).sum().sort_index()
def get_trade(self, trade_id: int) -> pd.Series:
"""Get trade.
Args:
trade_id: Id for the trade.
Trades of the same id are recognized as one individual trade.
Returns:
Trade of pd.Series.
"""
return self.loc[self._id == trade_id, "amount"]
def get_any(self, key: Any) -> Type["Trades"]:
"""Filter trade which match conditions at least one element.
Args:
key: Same arguments with pd.DataFrame.__getitem__.
Returns:
Trades.
"""
filterd_ids = self[key].ids
trades = [self.get_trade(i) for i in filterd_ids]
return make_trades(self.symbol, trades, filterd_ids)
def get_all(self, key: Any) -> Type["Trades"]:
"""Filter trade which match conditions for all elements.
Args:
key: Same arguments with pd.DataFrame.__getitem__.
Returns:
Trades.
"""
filterd = self[key]
ids = []
trades = []
for i in filterd.ids:
t = self.get_trade(i)
if t.equals(filterd.get_trade(i)):
ids.append(i)
trades.append(t)
return make_trades(self.symbol, trades, ids)
def reset_cols(self) -> None:
"""Keep only _target_columns"""
for col in self.columns:
if col not in self._target_columns:
self.drop(col, axis=1, inplace=True)
@property
def _constructor(self) -> Type["Trades"]:
return Trades
def _sum(a: pd.Series) -> float:
return a.sum() if len(a) != 0 else 0
def _sort(t: Trades) -> Trades:
t["ind"] = t.index
t = t.sort_values(by=["ind", "_id"])
t.reset_cols()
return t
def make_trade(transactions: Iterable[Transaction]) -> pd.Series:
"""Create Trade instance from transacsions"""
index = [t.timestamp for t in transactions]
data = [t.amount for t in transactions]
sr = pd.Series(index=index, data=data, name="amount")
return sr.groupby(sr.index).sum().sort_index()
def from_dataframe(df: pd.DataFrame, symbol: str) -> Trades:
"""Create a Trades instance out of a DataFrame object
Args:
df: DataFrame
symbol: symbol to query
Returns:
Trades
"""
trades = Trades(df.copy())
trades.symbol = symbol
trades.reset_cols()
return _sort(trades)
def concat(trades: List[Trades], refresh_id: bool = False) -> Trades:
"""Concatenate some of Trades
Args:
trades: List of trades
refresh_id: Set true to re-assign ids for trades. Default: False
Returns:
Trades
"""
if refresh_id:
id_offset = 0
list_of_trades = []
for a_trades in trades:
a_trades = a_trades.copy()
a_trades._id += id_offset
id_offset = a_trades._id.max() + 1
list_of_trades.append(a_trades)
trades = list_of_trades
t = Trades(
|
pd.concat(trades, axis=0)
|
pandas.concat
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# from QUANTAXIS.QAData.data_fq import QA_data_make_qfq, QA_data_make_hfq
# 基于Pytdx的数据接口,好处是可以在linux/mac上联入通达信行情
# 具体参见rainx的pytdx(https://github.com/rainx/pytdx)
#
import datetime
import numpy as np
import pandas as pd
from pytdx.exhq import TdxExHq_API
from pytdx.hq import TdxHq_API
from retrying import retry
from QUANTAXIS.QAFetch.base import _select_market_code, _select_index_code, _select_type, _select_bond_market_code
from QUANTAXIS.QAUtil import (QA_Setting, QA_util_date_stamp, QA_util_code_tostr,
QA_util_date_str2int, QA_util_date_valid,
QA_util_get_real_date, QA_util_get_real_datelist,
QA_util_future_to_realdatetime, QA_util_tdxtimestamp,
QA_util_future_to_tradedatetime,
QA_util_get_trade_gap, QA_util_log_info,
QA_util_time_stamp, QA_util_web_ping,
exclude_from_stock_ip_list, future_ip_list,
stock_ip_list, trade_date_sse)
from QUANTAXIS.QAUtil.QASetting import QASETTING
from QUANTAXIS.QASetting.QALocalize import log_path
from QUANTAXIS.QAUtil import Parallelism
from QUANTAXIS.QAUtil.QACache import QA_util_cache
def init_fetcher():
"""初始化获取
"""
pass
def ping(ip, port=7709, type_='stock'):
api = TdxHq_API()
apix = TdxExHq_API()
__time1 = datetime.datetime.now()
try:
if type_ in ['stock']:
with api.connect(ip, port, time_out=0.7):
res = api.get_security_list(0, 1)
if res is not None:
if len(api.get_security_list(0, 1)) > 800:
return datetime.datetime.now() - __time1
else:
print('BAD RESPONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
else:
print('BAD RESPONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
elif type_ in ['future']:
with apix.connect(ip, port, time_out=0.7):
res = apix.get_instrument_count()
if res is not None:
if res > 20000:
return datetime.datetime.now() - __time1
else:
print('️Bad FUTUREIP REPSONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
else:
print('️Bad FUTUREIP REPSONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
except Exception as e:
if isinstance(e, TypeError):
print(e)
print('Tushare内置的pytdx版本和QUANTAXIS使用的pytdx 版本不同, 请重新安装pytdx以解决此问题')
print('pip uninstall pytdx')
print('pip install pytdx')
else:
print('BAD RESPONSE {}'.format(ip))
return datetime.timedelta(9, 9, 0)
def select_best_ip():
QA_util_log_info('Selecting the Best Server IP of TDX')
# 删除exclude ip
import json
null = None
qasetting = QASETTING
exclude_ip = {'ip': '1.1.1.1', 'port': 7709}
default_ip = {'stock': {'ip': None, 'port': None},
'future': {'ip': None, 'port': None}}
alist = []
alist.append(exclude_ip)
ipexclude = qasetting.get_config(
section='IPLIST', option='exclude', default_value=alist)
exclude_from_stock_ip_list(ipexclude)
ipdefault = qasetting.get_config(
section='IPLIST', option='default', default_value=default_ip)
ipdefault = eval(ipdefault) if isinstance(ipdefault, str) else ipdefault
assert isinstance(ipdefault, dict)
if ipdefault['stock']['ip'] == None:
best_stock_ip = get_ip_list_by_ping(stock_ip_list)
else:
if ping(ipdefault['stock']['ip'], ipdefault['stock']['port'],
'stock') < datetime.timedelta(0, 1):
print('USING DEFAULT STOCK IP')
best_stock_ip = ipdefault['stock']
else:
print('DEFAULT STOCK IP is BAD, RETESTING')
best_stock_ip = get_ip_list_by_ping(stock_ip_list)
if ipdefault['future']['ip'] == None:
best_future_ip = get_ip_list_by_ping(future_ip_list, _type='future')
else:
if ping(ipdefault['future']['ip'], ipdefault['future']['port'],
'future') < datetime.timedelta(0, 1):
print('USING DEFAULT FUTURE IP')
best_future_ip = ipdefault['future']
else:
print('DEFAULT FUTURE IP {} is BAD, RETESTING'.format(ipdefault))
best_future_ip = get_ip_list_by_ping(future_ip_list,
_type='future')
ipbest = {'stock': best_stock_ip, 'future': best_future_ip}
qasetting.set_config(
section='IPLIST', option='default', default_value=ipbest)
QA_util_log_info(
'=== The BEST SERVER ===\n stock_ip {} future_ip {}'.format(
best_stock_ip['ip'], best_future_ip['ip']))
return ipbest
def get_ip_list_by_ping(ip_list=[], _type='stock'):
best_ip = get_ip_list_by_multi_process_ping(ip_list, 1, _type)
return best_ip[0]
def get_ip_list_by_multi_process_ping(ip_list=[], n=0, _type='stock',
cache_age=86400):
''' 根据ping排序返回可用的ip列表
2019 04 09 增加_type缓存时间cache_age
2019 03 31 取消参数filename
:param ip_list: ip列表
:param n: 最多返回的ip数量, 当可用ip数量小于n,返回所有可用的ip;n=0时,返回所有可用ip
:param _type: ip类型
:param cache_age: ip类型缓存时间(秒),默认为一天(86400秒)
:return: 可以ping通的ip列表
'''
cache = QA_util_cache()
results = cache.get(_type)
if results:
# read the data from cache
print('loading ip list from {} cache.'.format(_type))
else:
ips = [(x['ip'], x['port'], _type) for x in ip_list]
ps = Parallelism()
ps.run(ping, ips)
data = list(ps.get_results())
results = []
for i in range(len(data)):
# 删除ping不通的数据
if data[i] < datetime.timedelta(0, 9, 0):
results.append((data[i], ip_list[i]))
# 按照ping值从小大大排序
results = [x[1] for x in sorted(results, key=lambda x: x[0])]
if _type:
# store the data as binary data stream
cache.set(_type, results, age=cache_age)
print('saving ip list to {} cache {}'.format(_type, len(results)))
if len(results) > 0:
if n == 0 and len(results) > 0:
return results
else:
return results[:n]
else:
print('ALL IP PING TIMEOUT!')
return [{'ip': None, 'port': None}]
global best_ip
best_ip = {
'stock': {
'ip': None, 'port': None
},
'future': {
'ip': None, 'port': None
}
}
# return 1 if sh, 0 if sz
def get_extensionmarket_ip(ip, port):
global best_ip
if ip is None and port is None and best_ip['future']['ip'] is None and \
best_ip['future']['port'] is None:
best_ip = select_best_ip()
ip = best_ip['future']['ip']
port = best_ip['future']['port']
elif ip is None and port is None and \
best_ip['future']['ip'] is not None and \
best_ip['future']['port'] is not None:
ip = best_ip['future']['ip']
port = best_ip['future']['port']
else:
pass
return ip, port
def get_mainmarket_ip(ip, port):
"""[summary]
Arguments:
ip {[type]} -- [description]
port {[type]} -- [description]
Returns:
[type] -- [description]
"""
global best_ip
if ip is None and port is None and best_ip['stock']['ip'] is None and \
best_ip['stock']['port'] is None:
best_ip = select_best_ip()
ip = best_ip['stock']['ip']
port = best_ip['stock']['port']
elif ip is None and port is None and \
best_ip['stock']['ip'] is not None and \
best_ip['stock']['port'] is not None:
ip = best_ip['stock']['ip']
port = best_ip['stock']['port']
else:
pass
return ip, port
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_security_bars(code, _type, lens, ip=None, port=None):
"""按bar长度推算数据
Arguments:
code {[type]} -- [description]
_type {[type]} -- [description]
lens {[type]} -- [description]
Keyword Arguments:
ip {[type]} -- [description] (default: {best_ip})
port {[type]} -- [description] (default: {7709})
Returns:
[type] -- [description]
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat([api.to_df(
api.get_security_bars(_select_type(_type), _select_market_code(
code), code, (i - 1) * 800, 800)) for i in
range(1, int(lens / 800) + 2)], axis=0, sort=False)
data = data \
.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1,
inplace=False) \
.assign(datetime=pd.to_datetime(data['datetime']).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
date=data['datetime'].apply(lambda x: str(x)[0:10]),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(x)),
time_stamp=data['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=_type, code=str(code)) \
.set_index('datetime', drop=False, inplace=False).tail(lens)
if data is not None:
return data
else:
return None
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_day(code, start_date, end_date, if_fq='00',
frequence='day', ip=None, port=None):
"""获取日线及以上级别的数据
Arguments:
code {str:6} -- code 是一个单独的code 6位长度的str
start_date {str:10} -- 10位长度的日期 比如'2017-01-01'
end_date {str:10} -- 10位长度的日期 比如'2018-01-01'
Keyword Arguments:
if_fq {str} -- '00'/'bfq' -- 不复权 '01'/'qfq' -- 前复权 '02'/'hfq' -- 后复权 '03'/'ddqfq' -- 定点前复权 '04'/'ddhfq' --定点后复权
frequency {str} -- day/week/month/quarter/year 也可以是简写 D/W/M/Q/Y
ip {str} -- [description] (default: None) ip可以通过select_best_ip()函数重新获取
port {int} -- [description] (default: {None})
Returns:
pd.DataFrame/None -- 返回的是dataframe,如果出错比如只获取了一天,而当天停牌,返回None
Exception:
如果出现网络问题/服务器拒绝, 会出现socket:time out 尝试再次获取/更换ip即可, 本函数不做处理
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
try:
with api.connect(ip, port, time_out=0.7):
if frequence in ['day', 'd', 'D', 'DAY', 'Day']:
frequence = 9
elif frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['quarter', 'Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
start_date = str(start_date)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
data = pd.concat([api.to_df(
api.get_security_bars(frequence, _select_market_code(
code), code, (int(lens / 800) - i) * 800, 800)) for i in
range(int(lens / 800) + 1)], axis=0, sort=False)
# 这里的问题是: 如果只取了一天的股票,而当天停牌, 那么就直接返回None了
if len(data) < 1:
return None
data = data[data['open'] != 0]
data = data.assign(
date=data['datetime'].apply(lambda x: str(x[0:10])),
code=str(code),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(str(x)[0:10]))) \
.set_index('date', drop=False, inplace=False)
end_date = str(end_date)[0:10]
data = data.drop(
['year', 'month', 'day', 'hour', 'minute', 'datetime'],
axis=1)[
start_date:end_date]
if if_fq in ['00', 'bfq']:
return data
else:
print('CURRENTLY NOT SUPPORT REALTIME FUQUAN')
return None
# xdxr = QA_fetch_get_stock_xdxr(code)
# if if_fq in ['01','qfq']:
# return QA_data_make_qfq(data,xdxr)
# elif if_fq in ['02','hfq']:
# return QA_data_make_hfq(data,xdxr)
except Exception as e:
if isinstance(e, TypeError):
print('Tushare内置的pytdx版本和QUANTAXIS使用的pytdx 版本不同, 请重新安装pytdx以解决此问题')
print('pip uninstall pytdx')
print('pip install pytdx')
else:
print(e)
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_min(code, start, end, frequence='1min', ip=None,
port=None):
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
type_ = ''
start_date = str(start)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
if str(frequence) in ['5', '5m', '5min', 'five']:
frequence, type_ = 0, '5min'
lens = 48 * lens
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence, type_ = 8, '1min'
lens = 240 * lens
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence, type_ = 1, '15min'
lens = 16 * lens
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence, type_ = 2, '30min'
lens = 8 * lens
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence, type_ = 3, '60min'
lens = 4 * lens
if lens > 20800:
lens = 20800
with api.connect(ip, port):
data = pd.concat(
[api.to_df(
api.get_security_bars(
frequence, _select_market_code(
str(code)),
str(code),
(int(lens / 800) - i) * 800, 800)) for i
in range(int(lens / 800) + 1)], axis=0, sort=False)
data = data \
.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1,
inplace=False) \
.assign(datetime=pd.to_datetime(data['datetime']).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
code=str(code),
date=data['datetime'].apply(lambda x: str(x)[0:10]),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(x)),
time_stamp=data['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=type_).set_index('datetime', drop=False,
inplace=False)[start:end]
return data.assign(datetime=data['datetime'].apply(lambda x: str(x)))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_latest(code, frequence='day', ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
code = [code] if isinstance(code, str) else code
api = TdxHq_API(multithread=True)
if frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
elif frequence in ['5', '5m', '5min', 'five']:
frequence = 0
elif frequence in ['1', '1m', '1min', 'one']:
frequence = 8
elif frequence in ['15', '15m', '15min', 'fifteen']:
frequence = 1
elif frequence in ['30', '30m', '30min', 'half']:
frequence = 2
elif frequence in ['60', '60m', '60min', '1h']:
frequence = 3
else:
frequence = 9
with api.connect(ip, port):
data = pd.concat([api.to_df(api.get_security_bars(
frequence, _select_market_code(item), item, 0, 1)).assign(
code=item) for item in code], axis=0, sort=False)
return data \
.assign(date=pd.to_datetime(data['datetime']
.apply(lambda x: x[0:10])).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
date_stamp=data['datetime']
.apply(lambda x: QA_util_date_stamp(str(x[0:10])))) \
.set_index('date', drop=False) \
.drop(['year', 'month', 'day', 'hour', 'minute', 'datetime'],
axis=1)
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_realtime(code=['000001', '000002'], ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
# reversed_bytes9 --> 涨速
# active1,active2 --> 活跃度
# reversed_bytes1 --> -价格*100
# vol 总量 cur_vol 现量
# amount 总金额
# s_vol 内盘 b_vol 外盘
# reversed_bytes2 市场
# # reversed_bytes0 时间
api = TdxHq_API()
__data = pd.DataFrame()
with api.connect(ip, port):
code = [code] if isinstance(code, str) else code
for id_ in range(int(len(code) / 80) + 1):
__data = __data.append(api.to_df(api.get_security_quotes(
[(_select_market_code(x), x) for x in
code[80 * id_:80 * (id_ + 1)]])))
__data = __data.assign(datetime=datetime.datetime.now(
), servertime=__data['reversed_bytes0'].apply(QA_util_tdxtimestamp))
# __data['rev']
data = __data[
['datetime', 'servertime', 'active1', 'active2', 'last_close', 'code', 'open',
'high', 'low', 'price', 'cur_vol',
's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1',
'ask2', 'ask_vol2',
'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3',
'ask4',
'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5',
'bid_vol5']]
return data.set_index(['datetime', 'code'])
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_index_realtime(code=['000001'], ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
# reversed_bytes9 --> 涨速
# active1,active2 --> 活跃度
# reversed_bytes1 --> -价格*100
# vol 总量 cur_vol 现量
# amount 总金额
# s_vol 内盘 b_vol 外盘
# reversed_bytes2 市场
# # reversed_bytes0 时间
api = TdxHq_API()
__data = pd.DataFrame()
with api.connect(ip, port):
code = [code] if isinstance(code, str) else code
for id_ in range(int(len(code) / 80) + 1):
__data = __data.append(api.to_df(api.get_security_quotes(
[(_select_index_code(x), x) for x in
code[80 * id_:80 * (id_ + 1)]])))
__data = __data.assign(datetime=datetime.datetime.now(
), servertime=__data['reversed_bytes0'].apply(QA_util_tdxtimestamp))
# __data['rev']
data = __data[
['datetime', 'servertime', 'active1', 'active2', 'last_close', 'code', 'open',
'high', 'low', 'price', 'cur_vol',
's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1',
'ask2', 'ask_vol2',
'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3',
'ask4',
'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5',
'bid_vol5']]
return data.set_index(['datetime', 'code'])
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_bond_realtime(code=['010107'], ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
# reversed_bytes9 --> 涨速
# active1,active2 --> 活跃度
# reversed_bytes1 --> -价格*100
# vol 总量 cur_vol 现量
# amount 总金额
# s_vol 内盘 b_vol 外盘
# reversed_bytes2 市场
# # reversed_bytes0 时间
api = TdxHq_API()
__data = pd.DataFrame()
with api.connect(ip, port):
code = [code] if isinstance(code, str) else code
for id_ in range(int(len(code) / 80) + 1):
__data = __data.append(api.to_df(api.get_security_quotes(
[(_select_bond_market_code(x), x) for x in
code[80 * id_:80 * (id_ + 1)]])))
__data = __data.assign(datetime=datetime.datetime.now(
), servertime=__data['reversed_bytes0'].apply(QA_util_tdxtimestamp))
# __data['rev']
data = __data[
['datetime', 'servertime', 'active1', 'active2', 'last_close', 'code', 'open',
'high', 'low', 'price', 'cur_vol',
's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1',
'ask2', 'ask_vol2',
'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3',
'ask4',
'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5',
'bid_vol5']]
data = data.assign(last_close=data.last_close/10, open=data.open/10, high=data.high/10, low=data.low/10,
price= data.price/10,
ask1=data.ask1/10, ask2=data.ask2/10, ask3=data.ask3/10, ask4=data.ask4/10, ask5=data.ask5/10,
bid1=data.bid1/10, bid2=data.bid2/10, bid3=data.bid3/10, bid4=data.bid4/10, bid5=data.bid5/10)
return data.set_index(['datetime', 'code'])
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_depth_market_data(code=['000001', '000002'], ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
__data = pd.DataFrame()
with api.connect(ip, port):
code = [code] if isinstance(code, str) else code
for id_ in range(int(len(code) / 80) + 1):
__data = __data.append(api.to_df(api.get_security_quotes(
[(_select_market_code(x), x) for x in
code[80 * id_:80 * (id_ + 1)]])))
__data['datetime'] = datetime.datetime.now()
data = __data
# data = __data[['datetime', 'active1', 'active2', 'last_close', 'code', 'open', 'high', 'low', 'price', 'cur_vol',
# 's_vol', 'b_vol', 'vol', 'ask1', 'ask_vol1', 'bid1', 'bid_vol1', 'ask2', 'ask_vol2',
# 'bid2', 'bid_vol2', 'ask3', 'ask_vol3', 'bid3', 'bid_vol3', 'ask4',
# 'ask_vol4', 'bid4', 'bid_vol4', 'ask5', 'ask_vol5', 'bid5', 'bid_vol5']]
return data.set_index(['datetime', 'code'], drop=False, inplace=False)
'''
沪市
010xxx 国债
001×××国债现货;
110×××120×××企业债券;
129×××100×××可转换债券;
201×××国债回购;
310×××国债期货;
500×××550×××基金;
600×××A股;
700×××配股;
710×××转配股;
701×××转配股再配股;
711×××转配股再转配股;
720×××红利;
730×××新股申购;
735×××新基金申购;
737×××新股配售;
900×××B股。
深市
第1位 第二位 第3-6位 含义
0 0 XXXX A股证券
0 3 XXXX A股A2权证
0 7 XXXX A股增发
0 8 XXXX A股A1权证
0 9 XXXX A股转配
1 0 XXXX 国债现货
1 1 XXXX 债券
1 2 XXXX 可转换债券
1 3 XXXX 国债回购
1 7 XXXX 原有投资基金
1 8 XXXX 证券投资基金
2 0 XXXX B股证券
2 7 XXXX B股增发
2 8 XXXX B股权证
3 0 XXXX 创业板证券
3 7 XXXX 创业板增发
3 8 XXXX 创业板权证
3 9 XXXX 综合指数/成份指数
深市A股票买卖的代码是以000打头,如:顺鑫农业:股票代码是000860。
B股买卖的代码是以200打头,如:深中冠B股,代码是200018。
中小板股票代码以002打头,如:东华合创股票代码是002065。
创业板股票代码以300打头,如:探路者股票代码是:300005
更多参见 issue https://github.com/QUANTAXIS/QUANTAXIS/issues/158
@yutiansut
'''
def for_sz(code):
"""深市代码分类
Arguments:
code {[type]} -- [description]
Returns:
[type] -- [description]
"""
if str(code)[0:2] in ['00', '30', '02']:
return 'stock_cn'
elif str(code)[0:2] in ['39']:
return 'index_cn'
elif str(code)[0:2] in ['15']:
return 'etf_cn'
elif str(code)[0:3] in ['101', '104', '105', '106', '107', '108', '109',
'111', '112', '114', '115', '116', '117', '118', '119',
'123', '127', '128',
'131', '139', ]:
# 10xxxx 国债现货
# 11xxxx 债券
# 12xxxx 可转换债券
# 123
# 127
# 12xxxx 国债回购
return 'bond_cn'
elif str(code)[0:2] in ['20']:
return 'stockB_cn'
else:
return 'undefined'
def for_sh(code):
if str(code)[0] == '6':
return 'stock_cn'
elif str(code)[0:3] in ['000', '880']:
return 'index_cn'
elif str(code)[0:2] == '51':
return 'etf_cn'
# 110×××120×××企业债券;
# 129×××100×××可转换债券;
# 113A股对应可转债 132
elif str(code)[0:3] in ['102', '110', '113', '120', '122', '124',
'130', '132', '133', '134', '135', '136',
'140', '141', '143', '144', '147', '148']:
return 'bond_cn'
else:
return 'undefined'
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_list(type_='stock', ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
# if type_ in ['stock', 'gp']:
# res = pd.read_csv('http://data.yutiansut.com/stock_code.csv')
# return res.assign(code=res.code.apply(lambda x: QA_util_code_tostr(x)))
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(
sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0, sort=False) for
j
in range(2)], axis=0, sort=False)
# data.code = data.code.apply(int)
data = data.loc[:,['code','volunit','decimal_point','name','pre_close','sse']].set_index(
['code', 'sse'], drop=False)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(for_sz))
sh = sh.assign(sec=sh.code.apply(for_sh))
if type_ in ['stock', 'gp']:
# res = pd.read_csv('http://data.yutiansut.com/stock_code.csv')
# return res.assign(code=res.code.apply(lambda x: QA_util_code_tostr(x)))
return pd.concat([sz, sh], sort=False).query(
'sec=="stock_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
elif type_ in ['index', 'zs']:
return pd.concat([sz, sh], sort=False).query(
'sec=="index_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
# .assign(szm=data['name'].apply(lambda x: ''.join([y[0] for y in lazy_pinyin(x)])))\
# .assign(quanpin=data['name'].apply(lambda x: ''.join(lazy_pinyin(x))))
elif type_ in ['etf', 'ETF']:
return pd.concat([sz, sh], sort=False).query(
'sec=="etf_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
else:
return data.assign(
code=data['code'].apply(lambda x: str(x))).assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
# .assign(szm=data['name'].apply(lambda x: ''.join([y[0] for y in lazy_pinyin(x)])))\
# .assign(quanpin=data['name'].apply(lambda x: ''.join(lazy_pinyin(x))))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_index_list(ip=None, port=None):
"""获取指数列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(
sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0, sort=False) for
j
in range(2)], axis=0, sort=False)
data = data.loc[:,['code','volunit','decimal_point','name','pre_close','sse']].set_index(
['code', 'sse'], drop=False)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(for_sz))
sh = sh.assign(sec=sh.code.apply(for_sh))
return pd.concat([sz, sh], sort=False).query(
'sec=="index_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_bond_list(ip=None, port=None):
"""bond
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(
sse='sz' if j == 0 else 'sh') for i in
range(int(api.get_security_count(j) / 1000) + 1)], axis=0, sort=False) for
j
in range(2)], axis=0, sort=False)
# data.code = data.code.apply(int)
data = data.loc[:,['code','volunit','decimal_point','name','pre_close','sse']].set_index(
['code', 'sse'], drop=False)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(for_sz))
sh = sh.assign(sec=sh.code.apply(for_sh))
return pd.concat([sz, sh], sort=False).query('sec=="bond_cn"').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_bond_day(code, start_date, end_date, frequence='day', ip=None,
port=None):
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
if frequence in ['day', 'd', 'D', 'DAY', 'Day']:
frequence = 9
elif frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
with api.connect(ip, port):
start_date = str(start_date)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
code = str(code)
data = pd.concat([api.to_df(api.get_security_bars(
frequence, _select_bond_market_code(code),
code, (int(lens / 800) - i) * 800, 800))
for i in range(int(lens / 800) + 1)], axis=0, sort=False)
data = data.assign(
date=data['datetime'].apply(lambda x: str(x[0:10]))).assign(
code=str(code)) \
.assign(date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(str(x)[0:10]))) \
.set_index('date', drop=False, inplace=False) \
.assign(code=code) \
.drop(['year', 'month', 'day', 'hour',
'minute', 'datetime'], axis=1)[start_date:end_date]
return data.assign(date=data['date'].apply(lambda x: str(x)[0:10]))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_bond_min(code, start, end, frequence='1min', ip=None,
port=None):
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
start_date = str(start)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
if str(frequence) in ['5', '5m', '5min', 'five']:
frequence, type_ = 0, '5min'
lens = 48 * lens
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence, type_ = 8, '1min'
lens = 240 * lens
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence, type_ = 1, '15min'
lens = 16 * lens
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence, type_ = 2, '30min'
lens = 8 * lens
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence, type_ = 3, '60min'
lens = 4 * lens
if lens > 20800:
lens = 20800#u
code = str(code)
with api.connect(ip, port):
data = pd.concat(
[api.to_df(
api.get_security_bars(
frequence, _select_bond_market_code(
str(code)),
str(code),
(int(lens / 800) - i) * 800, 800)) for i
in range(int(lens / 800) + 1)], axis=0, sort=False)
#print(data)
data = data \
.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1,
inplace=False) \
.assign(datetime=pd.to_datetime(data['datetime']).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
code=str(code),
date=data['datetime'].apply(lambda x: str(x)[0:10]),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(x)),
time_stamp=data['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=type_).set_index('datetime', drop=False,
inplace=False)[start:end]
return data.assign(datetime=data['datetime'].apply(lambda x: str(x)))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_index_day(code, start_date, end_date, frequence='day',
ip=None, port=None):
"""指数日线
1- sh
0 -sz
Arguments:
code {[type]} -- [description]
start_date {[type]} -- [description]
end_date {[type]} -- [description]
Keyword Arguments:
frequence {str} -- [description] (default: {'day'})
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
Returns:
[type] -- [description]
"""
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
if frequence in ['day', 'd', 'D', 'DAY', 'Day']:
frequence = 9
elif frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
with api.connect(ip, port):
start_date = str(start_date)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
if str(code)[0] in ['5', '1']: # ETF
data = pd.concat([api.to_df(api.get_security_bars(
frequence, 1 if str(code)[0] in ['0', '8', '9', '5'] else 0,
code, (int(lens / 800) - i) * 800, 800))
for i in range(int(lens / 800) + 1)], axis=0, sort=False)
else:
data = pd.concat([api.to_df(api.get_index_bars(
frequence, 1 if str(code)[0] in ['0', '8', '9', '5'] else 0,
code, (int(lens / 800) - i) * 800, 800))
for i in range(int(lens / 800) + 1)], axis=0, sort=False)
data = data.assign(
date=data['datetime'].apply(lambda x: str(x[0:10]))).assign(
code=str(code)) \
.assign(date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(str(x)[0:10]))) \
.set_index('date', drop=False, inplace=False) \
.assign(code=code) \
.drop(['year', 'month', 'day', 'hour',
'minute', 'datetime'], axis=1)
data = data.loc[start_date:end_date]
return data.assign(date=data['date'].apply(lambda x: str(x)[0:10]))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_index_min(code, start, end, frequence='1min', ip=None,
port=None):
'指数分钟线'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
type_ = ''
start_date = str(start)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
if str(frequence) in ['5', '5m', '5min', 'five']:
frequence, type_ = 0, '5min'
lens = 48 * lens
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence, type_ = 8, '1min'
lens = 240 * lens
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence, type_ = 1, '15min'
lens = 16 * lens
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence, type_ = 2, '30min'
lens = 8 * lens
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence, type_ = 3, '60min'
lens = 4 * lens
if lens > 20800:
lens = 20800
with api.connect(ip, port):
if str(code)[0] in ['5', '1']: # ETF
data = pd.concat([api.to_df(api.get_security_bars(
frequence, 1 if str(code)[0] in ['0', '8', '9', '5'] else 0,
code, (int(lens / 800) - i) * 800, 800))
for i in range(int(lens / 800) + 1)], axis=0, sort=False)
else:
data = pd.concat([api.to_df(api.get_index_bars(
frequence, 1 if str(code)[0] in ['0', '8', '9', '5'] else 0,
code, (int(lens / 800) - i) * 800, 800))
for i in range(int(lens / 800) + 1)], axis=0, sort=False)
data = data \
.assign(datetime=pd.to_datetime(data['datetime']).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
code=str(code)) \
.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1,
inplace=False) \
.assign(code=code,
date=data['datetime'].apply(lambda x: str(x)[0:10]),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(x)),
time_stamp=data['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=type_).set_index('datetime', drop=False,
inplace=False)[start:end]
# data
return data.assign(datetime=data['datetime'].apply(lambda x: str(x)))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_index_latest(code, frequence='day', ip=None, port=None):
ip, port = get_mainmarket_ip(ip, port)
code = [code] if isinstance(code, str) else code
api = TdxHq_API(multithread=True)
if frequence in ['w', 'W', 'Week', 'week']:
frequence = 5
elif frequence in ['month', 'M', 'm', 'Month']:
frequence = 6
elif frequence in ['Q', 'Quarter', 'q']:
frequence = 10
elif frequence in ['y', 'Y', 'year', 'Year']:
frequence = 11
elif frequence in ['5', '5m', '5min', 'five']:
frequence = 0
elif frequence in ['1', '1m', '1min', 'one']:
frequence = 8
elif frequence in ['15', '15m', '15min', 'fifteen']:
frequence = 1
elif frequence in ['30', '30m', '30min', 'half']:
frequence = 2
elif frequence in ['60', '60m', '60min', '1h']:
frequence = 3
else:
frequence = 9
with api.connect(ip, port):
data = []
for item in code:
if str(item)[0] in ['5', '1']: # ETF
data.append(api.to_df(api.get_security_bars(frequence,
1 if str(item)[
0] in [
'0', '8',
'9',
'5'] else 0,
item, 0,
1)).assign(
code=item))
else:
data.append(api.to_df(api.get_index_bars(frequence,
1 if str(item)[0] in [
'0', '8', '9',
'5'] else 0, item,
0, 1)).assign(
code=item))
data = pd.concat(data, axis=0, sort=False)
return data \
.assign(date=pd.to_datetime(data['datetime']
.apply(lambda x: x[0:10])).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
date_stamp=data['datetime']
.apply(lambda x: QA_util_date_stamp(str(x[0:10])))) \
.set_index('date', drop=False) \
.drop(['year', 'month', 'day', 'hour', 'minute', 'datetime'],
axis=1)
def __QA_fetch_get_stock_transaction(code, day, retry, api):
batch_size = 2000 # 800 or 2000 ? 2000 maybe also works
data_arr = []
max_offset = 21
cur_offset = 0
type_ = 'tick'
while cur_offset <= max_offset:
one_chunk = api.get_history_transaction_data(
_select_market_code(str(code)), str(code), cur_offset * batch_size,
batch_size, QA_util_date_str2int(day))
if one_chunk is None or one_chunk == []:
break
data_arr = one_chunk + data_arr
cur_offset += 1
data_ = api.to_df(data_arr)
for _ in range(retry):
if len(data_) < 2:
return __QA_fetch_get_stock_transaction(code, day, 0, api)
else:
data_ = data_.assign(
date=day,
datetime=pd.to_datetime(data_['time'].apply(
lambda x: str(day) + ' ' + x)).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
code=str(code))
data_ = data_.assign(date_stamp=data_['datetime'].apply(lambda x: QA_util_date_stamp(x)),
time_stamp=data_['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=type_,
order=range(len(data_.index))).set_index('datetime', drop=False)
data_['datetime'] = data_['datetime'].apply(lambda x: str(x)[0:19])
return data_
def __QA_fetch_get_index_transaction(code, day, retry, api):
batch_size = 2000 # 800 or 2000 ? 2000 maybe also works
data_arr = []
max_offset = 21
cur_offset = 0
type_ = 'tick'
while cur_offset <= max_offset:
one_chunk = api.get_history_transaction_data(
_select_index_code(str(code)), str(code), cur_offset * batch_size,
batch_size, QA_util_date_str2int(day))
if one_chunk is None or one_chunk == []:
break
data_arr = one_chunk + data_arr
cur_offset += 1
data_ = api.to_df(data_arr)
for _ in range(retry):
if len(data_) < 2:
return __QA_fetch_get_index_transaction(code, day, 0, api)
else:
data_ = data_.assign(
date=day,
datetime=pd.to_datetime(data_['time'].apply(
lambda x: str(day) + ' ' + x)).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
code=str(code))
data_ = data_.assign(date_stamp=data_['datetime'].apply(lambda x: QA_util_date_stamp(x)),
time_stamp=data_['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=type_,
order=range(len(data_.index))).set_index('datetime', drop=False)
data_['datetime'] = data_['datetime'].apply(lambda x: str(x)[0:19])
return data_
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_transaction(code, start, end, retry=2, ip=None,
port=None):
'''
:param code: 股票代码
:param start: 开始日期
:param end: 结束日期
:param retry: 重新尝试次数
:param ip: 地址
:param port: 端口
:return:
'''
'历史分笔成交 buyorsell 1--sell 0--buy 2--盘前'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
real_start, real_end = QA_util_get_real_datelist(start, end)
if real_start is None:
return None
real_id_range = []
with api.connect(ip, port):
data = pd.DataFrame()
for index_ in range(trade_date_sse.index(real_start),
trade_date_sse.index(real_end) + 1):
try:
data_ = __QA_fetch_get_stock_transaction(
code, trade_date_sse[index_], retry, api)
if len(data_) < 1:
return None
except:
QA_util_log_info(
'Wrong in Getting {} history transaction data in day {}'.format(
code, trade_date_sse[index_]))
else:
QA_util_log_info(
'Successfully Getting {} history transaction data in day {}'.format(
code, trade_date_sse[index_]))
data = data.append(data_)
if len(data) > 0:
return data.assign(
datetime=data['datetime'].apply(lambda x: str(x)[0:19]))
else:
return None
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_index_transaction(code, start, end, retry=2, ip=None,
port=None):
'''
:param code: 指数代码
:param start: 开始日期
:param end: 结束日期
:param retry: 重新尝试次数
:param ip: 地址
:param port: 端口
:return:
'''
'历史分笔成交 buyorsell 1--sell 0--buy 2--盘前'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
real_start, real_end = QA_util_get_real_datelist(start, end)
if real_start is None:
return None
real_id_range = []
with api.connect(ip, port):
data = pd.DataFrame()
for index_ in range(trade_date_sse.index(real_start),
trade_date_sse.index(real_end) + 1):
try:
data_ = __QA_fetch_get_index_transaction(
code, trade_date_sse[index_], retry, api)
if len(data_) < 1:
return None
except:
QA_util_log_info(
'Wrong in Getting {} history transaction data in day {}'.format(
code, trade_date_sse[index_]))
else:
QA_util_log_info(
'Successfully Getting {} history transaction data in day {}'.format(
code, trade_date_sse[index_]))
data = data.append(data_)
if len(data) > 0:
return data.assign(
datetime=data['datetime'].apply(lambda x: str(x)[0:19]))
else:
return None
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_transaction_realtime(code, ip=None, port=None):
'实时分笔成交 包含集合竞价 buyorsell 1--sell 0--buy 2--盘前'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
try:
with api.connect(ip, port):
data = pd.DataFrame()
data = pd.concat([api.to_df(api.get_transaction_data(
_select_market_code(str(code)), code, (2 - i) * 2000, 2000))
for i in range(3)], axis=0, sort=False)
if 'value' in data.columns:
data = data.drop(['value'], axis=1)
data = data.dropna()
day = datetime.date.today()
return data.assign(
date=str(day),
datetime=pd.to_datetime(data['time'].apply(
lambda x: str(day) + ' ' + str(x))).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai'),
code=str(code),
order=range(len(data.index))).set_index('datetime', drop=False,
inplace=False)
except:
return None
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_xdxr(code, ip=None, port=None):
'除权除息'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
market_code = _select_market_code(code)
with api.connect(ip, port):
category = {
'1': '除权除息', '2': '送配股上市', '3': '非流通股上市', '4': '未知股本变动',
'5': '股本变化',
'6': '增发新股', '7': '股份回购', '8': '增发新股上市', '9': '转配股上市',
'10': '可转债上市',
'11': '扩缩股', '12': '非流通股缩股', '13': '送认购权证', '14': '送认沽权证'}
data = api.to_df(api.get_xdxr_info(market_code, code))
if len(data) >= 1:
data = data \
.assign(date=pd.to_datetime(data[['year', 'month', 'day']]).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai')) \
.drop(['year', 'month', 'day'], axis=1) \
.assign(category_meaning=data['category'].apply(
lambda x: category[str(x)])) \
.assign(code=str(code)) \
.rename(index=str, columns={'panhouliutong': 'liquidity_after',
'panqianliutong': 'liquidity_before',
'houzongguben': 'shares_after',
'qianzongguben': 'shares_before'}) \
.set_index('date', drop=False, inplace=False)
return data.assign(date=data['date'].apply(lambda x: str(x)[0:10]))
else:
return None
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_info(code, ip=None, port=None):
'股票基本信息'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
market_code = _select_market_code(code)
with api.connect(ip, port):
return api.to_df(api.get_finance_info(market_code, code))
@retry(stop_max_attempt_number=3, wait_random_min=50, wait_random_max=100)
def QA_fetch_get_stock_block(ip=None, port=None):
'板块数据'
ip, port = get_mainmarket_ip(ip, port)
api = TdxHq_API()
with api.connect(ip, port):
data = pd.concat([api.to_df(
api.get_and_parse_block_info("block_gn.dat")).assign(type='gn'),
api.to_df(api.get_and_parse_block_info(
"block.dat")).assign(type='yb'),
api.to_df(api.get_and_parse_block_info(
"block_zs.dat")).assign(type='zs'),
api.to_df(api.get_and_parse_block_info(
"block_fg.dat")).assign(type='fg')], sort=False)
if len(data) > 10:
return data.assign(source='tdx').drop(['block_type', 'code_index'],
axis=1).set_index('code',
drop=False,
inplace=False).drop_duplicates()
else:
QA_util_log_info('Wrong with fetch block ')
"""
http://www.tdx.com.cn/page_46.html
market category name short_name
1 1 临时股 TP
## 期权 OPTION
4 12 郑州商品期权 OZ
5 12 大连商品期权 OD
6 12 上海商品期权 OS
7 12 中金所期权 OJ
8 12 上海股票期权 QQ
9 12 深圳股票期权 (推测)
## 汇率 EXCHANGERATE
10 4 基本汇率 FE
11 4 交叉汇率 FX
## 全球 GLOBALMARKET
37 11 全球指数(静态) FW
12 5 国际指数 WI
13 3 国际贵金属 GO
14 3 伦敦金属 LM
15 3 伦敦石油 IP
16 3 纽约商品 CO
17 3 纽约石油 NY
18 3 芝加哥谷 CB
19 3 东京工业品 TO
20 3 纽约期货 NB
77 3 新加坡期货 SX
39 3 马来期货 ML
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
# 期货现货
42 3 商品指数 TI
60 3 主力期货合约 MA
28 3 郑州商品 QZ
29 3 大连商品 QD
30 3 上海期货 QS
46 11 上海黄金 SG
47 3 中金所期货 CZ
50 3 渤海商品 BH
76 3 齐鲁商品 QL
## 基金
33 8 开放式基金 FU
34 9 货币型基金 FB
35 8 招商理财产品 LC
36 9 招商货币产品 LB
56 8 阳光私募基金 TA
57 8 券商集合理财 TB
58 9 券商货币理财 TC
## 美股 USA STOCK
74 13 美国股票 US
40 11 中国概念股 CH
41 11 美股知名公司 MG
## 其他
38 10 宏观指标 HG
44 1 股转系统 SB
54 6 国债预发行 GY
62 5 中证指数 ZZ
70 5 扩展板块指数 UZ
71 2 港股通 GH
"""
"""
扩展行情
首先会初始化/存储一个
市场状况 extension_market_info
代码对应表 extension_market_list
"""
global extension_market_info
extension_market_info = None
global extension_market_list
extension_market_list = None
def QA_fetch_get_extensionmarket_count(ip=None, port=None):
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
with apix.connect(ip, port):
global extension_market_info
extension_market_info = apix.to_df(apix.get_markets())
return extension_market_info
def QA_fetch_get_extensionmarket_info(ip=None, port=None):
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
with apix.connect(ip, port):
global extension_market_info
extension_market_info = apix.to_df(apix.get_markets())
return extension_market_info
def QA_fetch_get_extensionmarket_list(ip=None, port=None):
'期货代码list'
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
with apix.connect(ip, port):
num = apix.get_instrument_count()
return pd.concat([apix.to_df(
apix.get_instrument_info((int(num / 500) - i) * 500, 500))
for i in range(int(num / 500) + 1)], axis=0, sort=False).set_index('code',
drop=False)
def QA_fetch_get_future_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
42 3 商品指数 TI
60 3 主力期货合约 MA
28 3 郑州商品 QZ
29 3 大连商品 QD
30 3 上海期货(原油+贵金属) QS
47 3 中金所期货 CZ
50 3 渤海商品 BH
76 3 齐鲁商品 QL
46 11 上海黄金(伦敦金T+D) SG
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query(
'market==42 or market==28 or market==29 or market==30 or market==47')
def QA_fetch_get_globalindex_list(ip=None, port=None):
"""全球指数列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
37 11 全球指数(静态) FW
12 5 国际指数 WI
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==12 or market==37')
def QA_fetch_get_goods_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
42 3 商品指数 TI
60 3 主力期货合约 MA
28 3 郑州商品 QZ
29 3 大连商品 QD
30 3 上海期货(原油+贵金属) QS
47 3 中金所期货 CZ
50 3 渤海商品 BH
76 3 齐鲁商品 QL
46 11 上海黄金(伦敦金T+D) SG
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query(
'market==50 or market==76 or market==46')
def QA_fetch_get_globalfuture_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
14 3 伦敦金属 LM
15 3 伦敦石油 IP
16 3 纽约商品 CO
17 3 纽约石油 NY
18 3 芝加哥谷 CB
19 3 东京工业品 TO
20 3 纽约期货 NB
77 3 新加坡期货 SX
39 3 马来期货 ML
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query(
'market==14 or market==15 or market==16 or market==17 or market==18 or market==19 or market==20 or market==77 or market==39')
def QA_fetch_get_hkstock_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==31 or market==48')
def QA_fetch_get_hkindex_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==27')
def QA_fetch_get_hkfund_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==49')
def QA_fetch_get_usstock_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
## 美股 USA STOCK
74 13 美国股票 US
40 11 中国概念股 CH
41 11 美股知名公司 MG
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query(
'market==74 or market==40 or market==41')
def QA_fetch_get_macroindex_list(ip=None, port=None):
"""宏观指标列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
38 10 宏观指标 HG
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==38')
def QA_fetch_get_option_all_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
'''
fix here :
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
result['meaningful_name'] = None
C:\work_new\QUANTAXIS\QUANTAXIS\QAFetch\QATdx.py:1468: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
'''
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] # 10001215
strName = result.loc[idx, 'name'] # 510050C9M03200
strDesc = result.loc[idx, 'desc'] # 10001215
# 50etf
if strName.startswith("510050"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
if strName.startswith("510050C"):
putcall = '50ETF,认购期权'
elif strName.startswith("510050P"):
putcall = '50ETF,认沽期权'
else:
putcall = "Unkown code name : " + strName
expireMonth = strName[7:8]
if expireMonth == 'A':
expireMonth = "10月"
elif expireMonth == 'B':
expireMonth = "11月"
elif expireMonth == 'C':
expireMonth = "12月"
else:
expireMonth = expireMonth + '月'
# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;
# fix here : M ??
if strName[8:9] == "M":
adjust = "未调整"
elif strName[8:9] == 'A':
adjust = " 第1次调整"
elif strName[8:9] == 'B':
adjust = " 第2调整"
elif strName[8:9] == 'C':
adjust = " 第3次调整"
elif strName[8:9] == 'D':
adjust = " 第4次调整"
elif strName[8:9] == 'E':
adjust = " 第5次调整"
elif strName[8:9] == 'F':
adjust = " 第6次调整"
elif strName[8:9] == 'G':
adjust = " 第7次调整"
elif strName[8:9] == 'H':
adjust = " 第8次调整"
elif strName[8:9] == 'I':
adjust = " 第9次调整"
elif strName[8:9] == 'J':
adjust = " 第10次调整"
else:
adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9]
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期月份:%s,%s,行权价:%s' % (
putcall, expireMonth, adjust, executePrice)
row = result.loc[idx]
rows.append(row)
# 300etf
if strName.startswith("510300"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
if strName.startswith("510300C"):
putcall = '300ETF,认购期权'
elif strName.startswith("510300P"):
putcall = '300ETF,认沽期权'
else:
putcall = "Unkown code name : " + strName
expireMonth = strName[7:8]
if expireMonth == 'A':
expireMonth = "10月"
elif expireMonth == 'B':
expireMonth = "11月"
elif expireMonth == 'C':
expireMonth = "12月"
else:
expireMonth = expireMonth + '月'
# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;
# fix here : M ??
if strName[8:9] == "M":
adjust = "未调整"
elif strName[8:9] == 'A':
adjust = " 第1次调整"
elif strName[8:9] == 'B':
adjust = " 第2调整"
elif strName[8:9] == 'C':
adjust = " 第3次调整"
elif strName[8:9] == 'D':
adjust = " 第4次调整"
elif strName[8:9] == 'E':
adjust = " 第5次调整"
elif strName[8:9] == 'F':
adjust = " 第6次调整"
elif strName[8:9] == 'G':
adjust = " 第7次调整"
elif strName[8:9] == 'H':
adjust = " 第8次调整"
elif strName[8:9] == 'I':
adjust = " 第9次调整"
elif strName[8:9] == 'J':
adjust = " 第10次调整"
else:
adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9]
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期月份:%s,%s,行权价:%s' % (
putcall, expireMonth, adjust, executePrice)
row = result.loc[idx]
rows.append(row)
elif strName.startswith("SR"):
# print("SR")
# SR1903-P-6500
expireYear = strName[2:4]
expireMonth = strName[4:6]
put_or_call = strName[7:8]
if put_or_call == "P":
putcall = "白糖,认沽期权"
elif put_or_call == "C":
putcall = "白糖,认购期权"
else:
putcall = "Unkown code name : " + strName
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期年月份:%s%s,行权价:%s' % (
putcall, expireYear, expireMonth, executePrice)
row = result.loc[idx]
rows.append(row)
pass
elif strName.startswith("CU"):
# print("CU")
# print("SR")
# SR1903-P-6500
expireYear = strName[2:4]
expireMonth = strName[4:6]
put_or_call = strName[7:8]
if put_or_call == "P":
putcall = "铜,认沽期权"
elif put_or_call == "C":
putcall = "铜,认购期权"
else:
putcall = "Unkown code name : " + strName
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期年月份:%s%s,行权价:%s' % (
putcall, expireYear, expireMonth, executePrice)
row = result.loc[idx]
rows.append(row)
pass
# todo 新增期权品种 棉花,玉米, 天然橡胶
elif strName.startswith("RU"):
# print("M")
# print(strName)
##
expireYear = strName[2:4]
expireMonth = strName[4:6]
put_or_call = strName[7:8]
if put_or_call == "P":
putcall = "天然橡胶,认沽期权"
elif put_or_call == "C":
putcall = "天然橡胶,认购期权"
else:
putcall = "Unkown code name : " + strName
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期年月份:%s%s,行权价:%s' % (
putcall, expireYear, expireMonth, executePrice)
row = result.loc[idx]
rows.append(row)
pass
elif strName.startswith("CF"):
# print("M")
# print(strName)
##
expireYear = strName[2:4]
expireMonth = strName[4:6]
put_or_call = strName[7:8]
if put_or_call == "P":
putcall = "棉花,认沽期权"
elif put_or_call == "C":
putcall = "棉花,认购期权"
else:
putcall = "Unkown code name : " + strName
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期年月份:%s%s,行权价:%s' % (
putcall, expireYear, expireMonth, executePrice)
row = result.loc[idx]
rows.append(row)
pass
elif strName.startswith("M"):
# print("M")
# print(strName)
##
expireYear = strName[1:3]
expireMonth = strName[3:5]
put_or_call = strName[6:7]
if put_or_call == "P":
putcall = "豆粕,认沽期权"
elif put_or_call == "C":
putcall = "豆粕,认购期权"
else:
putcall = "Unkown code name : " + strName
executePrice = strName[8:]
result.loc[idx, 'meaningful_name'] = '%s,到期年月份:%s%s,行权价:%s' % (
putcall, expireYear, expireMonth, executePrice)
row = result.loc[idx]
rows.append(row)
pass
elif strName.startswith("C") and strName[1] != 'F' and strName[
1] != 'U':
# print("M")
# print(strName)
##
expireYear = strName[1:3]
expireMonth = strName[3:5]
put_or_call = strName[6:7]
if put_or_call == "P":
putcall = "玉米,认沽期权"
elif put_or_call == "C":
putcall = "玉米,认购期权"
else:
putcall = "Unkown code name : " + strName
executePrice = strName[8:]
result.loc[idx, 'meaningful_name'] = '%s,到期年月份:%s%s,行权价:%s' % (
putcall, expireYear, expireMonth, executePrice)
row = result.loc[idx]
rows.append(row)
pass
else:
result.loc[idx, 'meaningful_name'] = "未知类型合约"
row = result.loc[idx]
rows.append(row)
return rows
###############################################################
# 期权合约分类
###############################################################
def QA_fetch_get_option_list(ip=None, port=None):
"""期权列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
## 期权 OPTION
1 12 临时期权(主要是50ETF)
4 12 郑州商品期权 OZ
5 12 大连商品期权 OD
6 12 上海商品期权 OS
7 12 中金所期权 OJ
8 12 上海股票期权 QQ
9 12 深圳股票期权 (推测)
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('category==12 and market!=1')
###############################################################
# 期权合约分类
# 50ETF
# 棉花
# 天然橡胶
# 铜
# 玉米
# 豆粕
# 白糖
# 红枣
###############################################################
# 50ETF
###############################################################
def QA_fetch_get_option_50etf_list():
'''
#🛠todo 获取50ETF期权合约的列表。
:return: dataframe
'''
result = QA_fetch_get_option_list('tdx')
result = result[result.name.str.startswith("510050")]
name = result.name.str
result = result.assign(putcall=name[6:7], expireMonth=name[7:8].replace({'A':'10','B':'11','C':'12'}), adjust=name[8:9], price=name[9:])
def __meaningful_name(df):
putcall = {'C':'认购期权', 'P':'认沽期权'}
adjust={'M':'未调整','A':'第1次调整','B':'第2次调整','C':'第3次调整','D':'第4次调整','E':'第5次调整','F':'第6次调整','G':'第7次调整','H':'第8次调整','I':'第9次调整','J':'第10次调整'}
return '%s,%s,到期月份:%s月,%s,行权价:%s' % ('50ETF', putcall.get(df.putcall, '错误编码'), df.expireMonth, adjust.get(df.adjust, '第10次以上的调整,调整代码 %s' % df.adjust), df.price)
result = result.assign(meaningful_name=result.apply(__meaningful_name, axis=1))
return result
def QA_fetch_get_option_50etf_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
rows = []
for _, row in QA_fetch_get_option_50etf_list().iterrows():
rows.append(row)
return rows
def QA_fetch_get_option_300etf_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
'''
fix here :
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
result['meaningful_name'] = None
C:\work_new\QUANTAXIS\QUANTAXIS\QAFetch\QATdx.py:1468: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
'''
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] # 10001215
strName = result.loc[idx, 'name'] # 510300C9M03200
strDesc = result.loc[idx, 'desc'] # 10001215
if strName.startswith("510300"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
if strName.startswith("510050C"):
putcall = '300ETF,认购期权'
elif strName.startswith("510050P"):
putcall = '300ETF,认沽期权'
else:
putcall = "Unkown code name : " + strName
expireMonth = strName[7:8]
if expireMonth == 'A':
expireMonth = "10月"
elif expireMonth == 'B':
expireMonth = "11月"
elif expireMonth == 'C':
expireMonth = "12月"
else:
expireMonth = expireMonth + '月'
# 第12位期初设为“M”,并根据合约调整次数按照“A”至“Z”依序变更,如变更为“A”表示期权合约发生首次调整,变更为“B”表示期权合约发生第二次调整,依此类推;
# fix here : M ??
if strName[8:9] == "M":
adjust = "未调整"
elif strName[8:9] == 'A':
adjust = " 第1次调整"
elif strName[8:9] == 'B':
adjust = " 第2调整"
elif strName[8:9] == 'C':
adjust = " 第3次调整"
elif strName[8:9] == 'D':
adjust = " 第4次调整"
elif strName[8:9] == 'E':
adjust = " 第5次调整"
elif strName[8:9] == 'F':
adjust = " 第6次调整"
elif strName[8:9] == 'G':
adjust = " 第7次调整"
elif strName[8:9] == 'H':
adjust = " 第8次调整"
elif strName[8:9] == 'I':
adjust = " 第9次调整"
elif strName[8:9] == 'J':
adjust = " 第10次调整"
else:
adjust = " 第10次以上的调整,调整代码 %s" + strName[8:9]
executePrice = strName[9:]
result.loc[idx, 'meaningful_name'] = '%s,到期月份:%s,%s,行权价:%s' % (
putcall, expireMonth, adjust, executePrice)
row = result.loc[idx]
rows.append(row)
return rows
###############################################################
# 棉花
###############################################################
def QA_fetch_get_commodity_option_CF_contract_time_to_market():
'''
铜期权 CU 开头 上期证
豆粕 M开头 大商所
白糖 SR开头 郑商所
测试中发现,行情不太稳定 ? 是 通达信 IP 的问题 ?
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] #
strName = result.loc[idx, 'name'] #
strDesc = result.loc[idx, 'desc'] #
# 如果同时获取, 不同的 期货交易所数据, pytdx会 connection close 连接中断?
# if strName.startswith("CU") or strName.startswith("M") or strName.startswith('SR'):
if strName.startswith("CF"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
row = result.loc[idx]
rows.append(row)
return rows
pass
###############################################################
# 天然橡胶
###############################################################
def QA_fetch_get_commodity_option_RU_contract_time_to_market():
'''
铜期权 CU 开头 上期证
豆粕 M开头 大商所
白糖 SR开头 郑商所
测试中发现,行情不太稳定 ? 是 通达信 IP 的问题 ?
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] #
strName = result.loc[idx, 'name'] #
strDesc = result.loc[idx, 'desc'] #
# 如果同时获取, 不同的 期货交易所数据, pytdx会 connection close 连接中断?
# if strName.startswith("CU") or strName.startswith("M") or strName.startswith('SR'):
if strName.startswith("RU"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
row = result.loc[idx]
rows.append(row)
return rows
pass
###############################################################
# 玉米
# 帅选M开头的合约
###############################################################
def QA_fetch_get_commodity_option_C_contract_time_to_market():
'''
铜期权 CU 开头 上期证
豆粕 M开头 大商所
白糖 SR开头 郑商所
测试中发现,行情不太稳定 ? 是 通达信 IP 的问题 ?
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] #
strName = result.loc[idx, 'name'] #
strDesc = result.loc[idx, 'desc'] #
# 如果同时获取, 不同的 期货交易所数据, pytdx会 connection close 连接中断?
# if strName.startswith("CU") or strName.startswith("M") or strName.startswith('SR'):
if strName.startswith("C") and strName[1] != 'F' and strName[1] != 'U':
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
row = result.loc[idx]
rows.append(row)
return rows
pass
###############################################################
# 铜
###############################################################
def QA_fetch_get_commodity_option_CU_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] #
strName = result.loc[idx, 'name'] #
strDesc = result.loc[idx, 'desc'] #
# 如果同时获取, 不同的 期货交易所数据, pytdx会 connection close 连接中断?
# if strName.startswith("CU") or strName.startswith("M") or strName.startswith('SR'):
if strName.startswith("CU"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
row = result.loc[idx]
rows.append(row)
return rows
###############################################################
# 金
###############################################################
def QA_fetch_get_commodity_option_AU_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] #
strName = result.loc[idx, 'name'] #
strDesc = result.loc[idx, 'desc'] #
# 如果同时获取, 不同的 期货交易所数据, pytdx会 connection close 连接中断?
# if strName.startswith("CU") or strName.startswith("M") or strName.startswith('SR'):
if strName.startswith("AU"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
row = result.loc[idx]
rows.append(row)
return rows
###############################################################
# al 铝
###############################################################
def QA_fetch_get_commodity_option_AL_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] #
strName = result.loc[idx, 'name'] #
strDesc = result.loc[idx, 'desc'] #
# 如果同时获取, 不同的 期货交易所数据, pytdx会 connection close 连接中断?
# if strName.startswith("CU") or strName.startswith("M") or strName.startswith('SR'):
if strName.startswith("AL"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
row = result.loc[idx]
rows.append(row)
return rows
###############################################################
# 豆粕
###############################################################
def QA_fetch_get_commodity_option_M_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
'''
铜期权 CU 开头 上期证
豆粕 M开头 大商所
白糖 SR开头 郑商所
'''
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] #
strName = result.loc[idx, 'name'] #
strDesc = result.loc[idx, 'desc'] #
# 如果同时获取, 不同的 期货交易所数据, pytdx connection close 连接中断?
# if strName.startswith("CU") or strName.startswith("M") or strName.startswith('SR'):
if strName.startswith("M"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
row = result.loc[idx]
rows.append(row)
return rows
###############################################################
# 白糖
###############################################################
def QA_fetch_get_commodity_option_SR_contract_time_to_market():
'''
#🛠todo 获取期权合约的上市日期 ? 暂时没有。
:return: list Series
'''
result = QA_fetch_get_option_list('tdx')
# pprint.pprint(result)
# category market code name desc code
'''
铜期权 CU 开头 上期证
豆粕 M开头 大商所
白糖 SR开头 郑商所
'''
# df = pd.DataFrame()
rows = []
result['meaningful_name'] = None
for idx in result.index:
# pprint.pprint((idx))
strCategory = result.loc[idx, "category"]
strMarket = result.loc[idx, "market"]
strCode = result.loc[idx, "code"] #
strName = result.loc[idx, 'name'] #
strDesc = result.loc[idx, 'desc'] #
# 如果同时获取, 不同的 期货交易所数据, pytdx connection close 连接中断?
# if strName.startswith("CU") or strName.startswith("M") or strName.startswith('SR'):
if strName.startswith("SR"):
# print(strCategory,' ', strMarket, ' ', strCode, ' ', strName, ' ', strDesc, )
row = result.loc[idx]
rows.append(row)
return rows
#########################################################################################
def QA_fetch_get_exchangerate_list(ip=None, port=None):
"""汇率列表
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
## 汇率 EXCHANGERATE
10 4 基本汇率 FE
11 4 交叉汇率 FX
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==10 or market==11').query(
'category==4')
def QA_fetch_get_future_day(code, start_date, end_date, frequence='day',
ip=None, port=None):
'期货数据 日线'
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
start_date = str(start_date)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
with apix.connect(ip, port):
code_market = extension_market_list.query(
'code=="{}"'.format(code)).iloc[0]
data = pd.concat(
[apix.to_df(apix.get_instrument_bars(
_select_type(frequence),
int(code_market.market),
str(code),
(int(lens / 700) - i) * 700, 700)) for i in
range(int(lens / 700) + 1)],
axis=0, sort=False)
try:
# 获取商品期货会报None
data = data.assign(
date=data['datetime'].apply(lambda x: str(x[0:10]))).assign(
code=str(code), date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(str(x)[0:10]))).set_index('date',
drop=False,
inplace=False)
except Exception as exp:
print("code is ", code)
print(exp.__str__)
return None
return data.drop(
['year', 'month', 'day', 'hour', 'minute', 'datetime'], axis=1)[
start_date:end_date].assign(
date=data['date'].apply(lambda x: str(x)[0:10]))
def QA_fetch_get_future_min(code, start, end, frequence='1min', ip=None,
port=None):
'期货数据 分钟线'
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
type_ = ''
start_date = str(start)[0:10]
today_ = datetime.date.today()
lens = QA_util_get_trade_gap(start_date, today_)
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
if str(frequence) in ['5', '5m', '5min', 'five']:
frequence, type_ = 0, '5min'
lens = 48 * lens * 2.5
elif str(frequence) in ['1', '1m', '1min', 'one']:
frequence, type_ = 8, '1min'
lens = 240 * lens * 2.5
elif str(frequence) in ['15', '15m', '15min', 'fifteen']:
frequence, type_ = 1, '15min'
lens = 16 * lens * 2.5
elif str(frequence) in ['30', '30m', '30min', 'half']:
frequence, type_ = 2, '30min'
lens = 8 * lens * 2.5
elif str(frequence) in ['60', '60m', '60min', '1h']:
frequence, type_ = 3, '60min'
lens = 4 * lens * 2.5
if lens > 20800:
lens = 20800
# print(lens)
with apix.connect(ip, port):
code_market = extension_market_list.query(
'code=="{}"'.format(code)).iloc[0]
data = pd.concat([apix.to_df(
apix.get_instrument_bars(frequence, int(code_market.market), str(
code), (int(lens / 700) - i) * 700, 700)) for i in
range(int(lens / 700) + 1)], axis=0, sort=False)
# print(data)
# print(data.datetime)
data = data \
.assign(tradetime=data['datetime'].apply(str), code=str(code),
datetime=pd.to_datetime(
data['datetime'].apply(QA_util_future_to_realdatetime, 1)).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai')) \
.drop(['year', 'month', 'day', 'hour', 'minute'], axis=1,
inplace=False) \
.assign(date=data['datetime'].apply(lambda x: str(x)[0:10]),
date_stamp=data['datetime'].apply(
lambda x: QA_util_date_stamp(x)),
time_stamp=data['datetime'].apply(
lambda x: QA_util_time_stamp(x)),
type=type_).set_index('datetime', drop=False,
inplace=False)
return data.assign(datetime=data['datetime'].apply(lambda x: str(x)))[
start:end].sort_index()
def __QA_fetch_get_future_transaction(code, day, retry, code_market, apix):
batch_size = 1800 # 800 or 2000 ? 2000 maybe also works
data_arr = []
max_offset = 40
cur_offset = 0
while cur_offset <= max_offset:
one_chunk = apix.get_history_transaction_data(
code_market, str(code), QA_util_date_str2int(day),
cur_offset * batch_size)
if one_chunk is None or one_chunk == []:
break
data_arr = one_chunk + data_arr
cur_offset += 1
data_ = apix.to_df(data_arr)
for _ in range(retry):
if len(data_) < 2:
import time
time.sleep(1)
return __QA_fetch_get_stock_transaction(code, day, 0, apix)
else:
return data_.assign(datetime=pd.to_datetime(data_['date'])).dt.tz_localize(None).dt.tz_localize('Asia/Shanghai').assign(
date=str(day)) \
.assign(code=str(code)).assign(
order=range(len(data_.index))).set_index('datetime',
drop=False,
inplace=False)
def QA_fetch_get_future_transaction(code, start, end, retry=4, ip=None,
port=None):
'期货历史成交分笔'
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
real_start, real_end = QA_util_get_real_datelist(start, end)
if real_start is None:
return None
real_id_range = []
with apix.connect(ip, port):
code_market = extension_market_list.query(
'code=="{}"'.format(code)).iloc[0]
data = pd.DataFrame()
for index_ in range(trade_date_sse.index(real_start),
trade_date_sse.index(real_end) + 1):
try:
data_ = __QA_fetch_get_future_transaction(
code, trade_date_sse[index_], retry,
int(code_market.market), apix)
if len(data_) < 1:
return None
except Exception as e:
print(e)
QA_util_log_info(
'Wrong in Getting {} history transaction data in day {}'.format(
code, trade_date_sse[index_]))
else:
QA_util_log_info(
'Successfully Getting {} history transaction data in day {}'.format(
code, trade_date_sse[index_]))
data = data.append(data_)
if len(data) > 0:
return data.assign(
datetime=data['datetime'].apply(lambda x: str(x)[0:19]))
else:
return None
def QA_fetch_get_future_transaction_realtime(code, ip=None, port=None):
'期货历史成交分笔'
ip, port = get_extensionmarket_ip(ip, port)
apix = TdxExHq_API()
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
code_market = extension_market_list.query(
'code=="{}"'.format(code)).iloc[0]
with apix.connect(ip, port):
data =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas as pd
import glob
import re
# make a list of paths to load .csv files
load_path = '/Users/jiajiazheng/Box/Suh\'s lab/GSRs/Jiajia/3-Residential Solar-plus-storage/Load_Profiles_Clean/*.csv'
load_code_list = []
for load_code in glob.glob(load_path):
load_code_list.append(load_code)
# define the parameters for right paths
suffix = "TYA.CSV_PV_gen"
PV_folder_path = "/Users/jiajiazheng/Box/Suh's lab/GSRs/Jiajia/3-Residential Solar-plus-storage/PV_Outputs_4kW/"
save_to_path = "/Users/jiajiazheng/Box/Suh's lab/GSRs/Jiajia/3-Residential Solar-plus-storage/Input_Data_2020/"
df_shape = list() # a list to contain the shapes of combined input data frames to check completeness
df_load_sum = {} # a dictionary to store the location code and total load
# iterate over the solar PV generation profiles and merge with load profiles for all 73 households
for i in range(len(load_code_list)):
df_load =
|
pd.read_csv(load_code_list[i])
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""Data1.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1nPDG8YKgABjWKTL1cRhMFXRTKRE4WnD2
"""
# Commented out IPython magic to ensure Python compatibility.
import pandas as pd
import numpy as np
import seaborn as sns
# %matplotlib inline
import matplotlib.pyplot as plt
import sklearn
import xgboost
from imblearn.over_sampling import SMOTE
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score,KFold,StratifiedKFold
from sklearn.metrics import confusion_matrix, accuracy_score,f1_score,precision_score,roc_auc_score
def transform_encode(data):
""" removing outlier to do these we will use LocalOutlierfactor, any value tha
t is less than one will be an outlier,the purpose of removing outliers is to prevent the model from
taking too long to load and misledding the model"""
from scipy import stats
from sklearn.preprocessing import StandardScaler, LabelEncoder,OneHotEncoder
from sklearn.neighbors import LocalOutlierFactor
"""duration was dropped because it has correlation with the target variable
if duration is 0 then the customer might not subscribed, also this was done
so that it will not impact our outlier removal since it is not needed in training
"""
data_1 = data.drop(['duration','y'],axis=1)
numerical_df = data_1.select_dtypes(include=['int','float'])#selecting float and int columns
list_numerical_df_columns = list(numerical_df.columns)
"""The localoutlierfactor is another model to detect outliers,
any value that is less than 1 is considered an outlier since it dosen'
follow the uniform distribution"""
lof = LocalOutlierFactor()
yhat = lof.fit_predict(numerical_df) #fitting the localoutlier factor model
mask = yhat !=-1
data = data.loc[mask,:]
data_1 = data_1.loc[mask,:] #filtering out rows that are not outliers
for col in list_numerical_df_columns:
data_1[col] = StandardScaler().fit_transform(data_1[[col]]) #scaling the values so it can be on the same range
cat_df = data_1.select_dtypes(include=['object'])
cat_dumm = pd.get_dummies(cat_df) #converting the categorical data to 1 or 0
"""dropping the categorical columns becaue we have encoded and the old columns
are not needed"""
df = data_1.drop(list(cat_df.columns),axis=1)
"""concatenating the dataframe with the encoded categorical columns since we
had dropped the columns earlier"""
df =
|
pd.concat([df,cat_dumm],axis=1)
|
pandas.concat
|
from itertools import permutations
import dataset, spatial_utils
import pandas as pd, numpy as np
import torch
import os
import matplotlib.pyplot as plt, matplotlib.pylab as pylab
from sklearn.manifold import TSNE
from tqdm import tqdm
def get_phenotype_cmap():
'''
Maps cell types to distinctive colors for plotting.
Output:
phenotype_dict: Dict of phenotype:color mappings.
'''
phenotype_dict = {
'Tumor Cell': 'black',
'Unclassified': 'grey',
'Potential Artifact': 'lightgrey',
'Macrophage': 'magenta',
'B Cell': 'red',
'Monocyte': 'orange',
'Helper T Cell': 'greenyellow',
'Regulatory T Cell': 'springgreen',
'Cytotoxic T Cell': 'cyan',
'Vasculature': 'royalblue',
'Neutrophil': 'blueviolet',
'Plasma Cell': 'lightpink',
}
return phenotype_dict
def get_phenotype_cmap2():
'''palette = {"B Cells":"#0000FF","Cytotoxic T Cell":"tab:cyan", "Helper T Cells":"#000000",
"Lymphocyte Other":"#1D8348", "Macrophage":"#34FF70", "Monocyte": "#3CAB97", "Myeloid Cell Other":"#9999FF",
"NK Cells":"#ffff00","Neutrophils":"#FFB8CE","Plasma Cells":"#FF1493", "Regulatory T Cell": "#884EA0",
"Tumor":"#ff0000","Unclassified":"#CCCCCC", "Vasculature":"#D4AB84", "Neutrophil" : "#F665E9"}'''
phenotype_dict = {
"B Cell":"#0000FF",
"Cytotoxic T Cell":"tab:cyan",
"Helper T Cell":"#000000",
"Lymphocyte Other":"#1D8348",
"Macrophage":"#34FF70",
"Monocyte": "#3CAB97",
"Myeloid Cell Other":"#9999FF",
"NK Cells":"#ffff00",
"Neutrophil":"#FFB8CE",
"Plasma Cell":"#FF1493",
"Regulatory T Cell": "#884EA0",
"Tumor Cell":"#ff0000",
"Unclassified":"#CCCCCC",
"Vasculature":"#D4AB84",
#"Neutrophil" : "#F665E9"
}
return phenotype_dict
def plot_sample(sample, num_points=None):
'''
Plots the sample region along with the cmap legend.
Input:
sample: DataFrame containing the sample region to plot.
'''
# Load the phenotype cmap and retrieve sample info for the plot title
phenotype_cmap = get_phenotype_cmap()
sample_name = sample.iloc[0].Sample
pathology = sample.iloc[0].Pathology
if num_points:
sample = sample[:num_points]
fig = pylab.figure(figsize=(9,9))
ax = fig.add_subplot(111)
for phenotype in phenotype_cmap:
# If this is a tumor cell, apply a marker corresponding to its HLA1 type
if phenotype == 'Tumor Cell':
for (hla1_type, marker) in [('Negative', 'v'), ('Moderate', 'o'), ('High', '^')]:
phenotype_rows = sample[(sample.Phenotype == phenotype) & (sample.HLA1_FUNCTIONAL_threeclass == hla1_type)]
ax.scatter(x=phenotype_rows.X, y=phenotype_rows.invertY, s=9, c=phenotype_cmap[phenotype], label=phenotype+': HLA1 '+hla1_type, marker=marker)
else:
phenotype_rows = sample[sample.Phenotype == phenotype]
ax.scatter(x=phenotype_rows.X, y=phenotype_rows.invertY, s=4, c=phenotype_cmap[phenotype], label=phenotype)
plt.title('Sample: %s, %s' % (sample_name, pathology))
fig.show()
figlegend = pylab.figure(figsize=(3,4))
figlegend.legend(ax.get_legend_handles_labels()[0], ax.get_legend_handles_labels()[1])
figlegend.show()
def plot_data(xs, ts, df, n_dist=None, plot_legend=False):
phenotype_cmap = get_phenotype_cmap2()
coords = np.array(xs[:,:2])
phenotypes = np.array(xs[:,2])
if ts==0: label = 'Intact'
if ts==1: label = 'Mixed'
#if ts[b].eq(torch.tensor([0, 0, 1])).all(): label = 'Tumor'
fig = pylab.figure(figsize=(9,9))
ax = fig.add_subplot(111)
for phenotype in phenotype_cmap:
if not [x for x in df.Phenotype.cat.categories if x == phenotype]:
continue
phenotype_coords = coords[phenotypes == np.argwhere(df.Phenotype.cat.categories == phenotype)[0]]
b_cell = np.argwhere(df.Phenotype.cat.categories == 'B Cell')[0]
tumor_cell = np.argwhere(df.Phenotype.cat.categories == 'Tumor Cell')[0]
helpert_cell = np.argwhere(df.Phenotype.cat.categories == 'Helper T Cell')[0]
if n_dist:
#for neighbor_phenotype in tqdm(np.argwhere(df.Phenotype.cat.categories != phenotype)):
if phenotype == 'B Cell':
neighbor_coords = coords[phenotypes == tumor_cell]
dist_matrix = spatial_utils.build_distance_matrix(phenotype_coords, neighbor_coords, n_dist)
center_coords = phenotype_coords[np.argwhere(dist_matrix > 0)[:,0]]
c_x, c_y = [x[0] for x in center_coords], [y[1] for y in center_coords]
neighbor_coords = neighbor_coords[np.argwhere(dist_matrix > 0)[:,1]]
n_x, n_y = [x[0] for x in neighbor_coords], [y[1] for y in neighbor_coords]
plt.plot([c_x, n_x], [c_y, n_y], c='black', linewidth=0.4)
ax.scatter(x=phenotype_coords[:,0], y=phenotype_coords[:,1], s=4, c=phenotype_cmap[phenotype], label=phenotype)
plt.title(f'Sample: {df.Sample.iloc[0]}, Label: {label}')
fig.show()
if plot_legend:
figlegend = pylab.figure(figsize=(3,4))
figlegend.legend(ax.get_legend_handles_labels()[0], ax.get_legend_handles_labels()[1])
figlegend.show()
if __name__ == '__main__':
in_file='datasets/BestClassification_July2021_14Samples.tsv'
df = dataset.read_tsv(in_file)
cats = df.Sample.cat.categories
print('Number of categories: %i' % (len(cats)))
'''List of cells and HLA1s to process'''
cells = [
('Tumor Cell', 'Negative'),
('Unclassified', 'NA'),
('Macrophage', 'NA'),
('B Cell', 'NA'),
('Helper T Cell', 'NA'),
('Regulatory T Cell', 'NA'),
('Cytotoxic T Cell', 'NA'),
('Vasculature', 'NA'),
('Neutrophil', 'NA'),
('Plasma Cell', 'NA'),
]
region = ['Normal', 'Tumor', 'Interface']
#sample = dataset.sample_region(df, index=4)
#plot_sample(sample)
'''Find all unique n_perms in the set of cells'''
n_perms = 2
perms = [x for x in permutations(cells,n_perms) if x[:][1][0] != 'Tumor Cell']# and x[:][2][0] != 'Tumor Cell']
print(f'Number of permutations: {len(perms)}')
'''Arrange the output dict, which will become a CSV'''
data = {}
data['Region'] = []
for n in range(n_perms):
data['Phenotype'+str(n)]=[]
data['Instances'], data['Counts'], data['PRs'], data['Label'] = [], [], [], []
'''Iterate over each sample region in the dataset'''
for cat in tqdm(cats):
sample, label = dataset.sample_region_df(df, cat=cat)
perms = permutations(cells,n_perms)
'''For each n_perm, where the second and third rows are not Tumor Cell (to remove tumor cell -> tumor cell interactions)'''
for perm in [x for x in perms if x[:][1][0] != 'Tumor Cell']:# and x[:][2][0] != 'Tumor Cell']:
data['Region'].append(cat)
data['Label'].append(label)
'''Load the phenotype names into the dict'''
sample_ptypes = []
for n, p in enumerate(perm):
s = p[0]
'''if s=='Tumor Cell':
s+=': '+p[1]'''
data['Phenotype'+str(n)].append(s)
sample_ptypes.append(sample[sample.Phenotype == p[0]])
#if sample.iloc[0].Pathology != 'Normal': continue
# sample_name = cat
pathology = sample.iloc[0].Pathology
# print('Sample: %s, %s Region' % (sample_name, pathology))
# print('The sample size is %i' % len(sample))
target_cells = cells[3:]
# print('Number of %s: %i' % (ptype1, len(sample_ptype1)))
# print('Number of %s: %i' % (ptype2, len(sample_ptype2)))
instances = spatial_utils.calculate_instances(sample_ptypes[0], sample_ptypes[1:], d=50)
count = len(sample_ptypes[0])
# print('Participation Ratio: %0.3f' % (instances / count))
data['Instances'].append(instances)
data['Counts'].append(count)
if data['Counts'][-1]>0:
data['PRs'].append(data['Instances'][-1]/data['Counts'][-1])
else:
data['PRs'].append(0)
'''Output result to CSV'''
df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] =
|
tm.makeDataFrame()
|
pandas.util.testing.makeDataFrame
|
# --------------
#Importing the modules
import pandas as pd
import numpy as np
from scipy.stats import mode
#Code for categorical variable
def categorical(df):
categorical_var = pd.Categorical(df)
#categorical_var = df.unique()
return categorical_var
#Code for numerical variable
def numerical(df):
numerical_var = df._get_numeric_data().columns
#numerical_var = df.nunique()
return numerical_var
#code to check distribution of variable
def clear(df,col,val):
count = df[col].value_counts()
return count
#Code to check instances based on the condition
def instances_based_condition(df,col1,val1,col2,val2):
instance = df[(df[col1]>val1) & (df[col2]==val2)]
return instance
# Code to calculate different aggreagted values according to month
#pivot = pd.pivot_table(df,index="Type 1",values="Attack speed points",columns="Generation",aggfunc="mean")
#print(pivot)
def agg_values_ina_month(df,date_col,agg_col,agg):
df[date_col]=
|
pd.to_datetime(df[date_col])
|
pandas.to_datetime
|
from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.computation.check import _NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
with pytest.raises(ImportError):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError):
df.eval("A+1", engine="numexpr")
class TestDataFrameEval:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(np.random.randn(10, 2), columns=list("ab"))
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b], names=["color", "food"])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(
df.index.get_level_values("color").values, index=index, name="color"
)
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# ## LEVEL 1
ind = Series(df.index.get_level_values(1).values, index=index)
res1 = df.query('ilevel_1 == "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" == ilevel_1', parser=parser, engine=engine)
exp = df[ind == "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_1 != "eggs"', parser=parser, engine=engine)
res2 = df.query('"eggs" != ilevel_1', parser=parser, engine=engine)
exp = df[ind != "eggs"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_1 == ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] == ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_1 != ["eggs"]', parser=parser, engine=engine)
res2 = df.query('["eggs"] != ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["eggs"] in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" in ilevel_1', parser=parser, engine=engine)
exp = df[ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["eggs"] not in ilevel_1', parser=parser, engine=engine)
res2 = df.query('"eggs" not in ilevel_1', parser=parser, engine=engine)
exp = df[~ind.isin(["eggs"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_partially_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.arange(10)
index = MultiIndex.from_arrays([a, b])
index.names = [None, "rating"]
df = DataFrame(np.random.randn(10, 2), index=index)
res = df.query("rating == 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind == 1]
tm.assert_frame_equal(res, exp)
res = df.query("rating != 1", parser=parser, engine=engine)
ind = Series(
df.index.get_level_values("rating").values, index=index, name="rating"
)
exp = df[ind != 1]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind == "red"]
tm.assert_frame_equal(res, exp)
res = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
ind = Series(df.index.get_level_values(0).values, index=index)
exp = df[ind != "red"]
tm.assert_frame_equal(res, exp)
def test_query_multiindex_get_index_resolvers(self):
df = tm.makeCustomDataframe(
10, 3, r_idx_nlevels=2, r_idx_names=["spam", "eggs"]
)
resolvers = df._get_index_resolvers()
def to_series(mi, level):
level_values = mi.get_level_values(level)
s = level_values.to_series()
s.index = mi
return s
col_series = df.columns.to_series()
expected = {
"index": df.index,
"columns": col_series,
"spam": to_series(df.index, "spam"),
"eggs": to_series(df.index, "eggs"),
"C0": col_series,
}
for k, v in resolvers.items():
if isinstance(v, Index):
assert v.is_(expected[k])
elif isinstance(v, Series):
tm.assert_series_equal(v, expected[k])
else:
raise AssertionError("object must be a Series or Index")
@td.skip_if_no_ne
class TestDataFrameQueryNumExprPandas:
@classmethod
def setup_class(cls):
cls.engine = "numexpr"
cls.parser = "pandas"
@classmethod
def teardown_class(cls):
del cls.engine, cls.parser
def test_date_query_with_attribute_access(self):
engine, parser = self.engine, self.parser
skip_if_no_pandas_parser(parser)
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query(
"@df.dates1 < 20130101 < @df.dates3", engine=engine, parser=parser
)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_no_attribute_access(self):
engine, parser = self.engine, self.parser
df = DataFrame(np.random.randn(5, 3))
df["dates1"] = date_range("1/1/2012", periods=5)
df["dates2"] = date_range("1/1/2013", periods=5)
df["dates3"] = date_range("1/1/2014", periods=5)
res = df.query("dates1 < 20130101 < dates3", engine=engine, parser=parser)
expec = df[(df.dates1 < "20130101") & ("20130101" < df.dates3)]
tm.assert_frame_equal(res, expec)
def test_date_query_with_NaT(self):
engine, parser = self.engine, self.parser
n = 10
df = DataFrame(np.random.randn(n, 3))
df["dates1"] = date_range("1/1/2012", periods=n)
df["dates2"] = date_range("1/1/2013", periods=n)
df["dates3"] =
|
date_range("1/1/2014", periods=n)
|
pandas.date_range
|
import numpy as np
import pandas as pd
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from scipy.stats import pearsonr, zscore
from tqdm import tqdm
from joblib import Parallel, delayed
import os
# number of bootstraps
n_boot = 100
ss = StandardScaler()
kr = SVR(kernel='linear', C=10)
pipe = Pipeline([('scaler',ss), ('regressor', kr)])
def main(permute=False):
os.makedirs('results/age_predictions', exist_ok=True)
### LOAD DATA ##################################################################################################
# load sample, sex, RIN corrected data for all samples up to 400 days
bulk_data = pd.read_csv('results/gene_models/W5-gene-data-corrected.csv')
significant_genes = pd.read_csv('results/gene_correlations/PCA_correlations-KendallTau-PC-significant_genes-p0.05.csv')['symbol']
# load PC data
pca_data = pd.read_csv('results/PCA/mean-regional-principal-components.csv')
pca_dict = dict(zip(pca_data['region'], pca_data['PC1']))
# pivot table
model_data = pivot_bulk_data(bulk_data, values='residuals') # sample data, all genes
model_data.insert(3, 'PC', model_data['region'].map(pca_dict))
# select significant genes
significant_model_data = pd.concat((model_data[['sample','region','age', 'PC']], model_data[significant_genes]), axis=1)
# validation data
braincloud_data =
|
pd.read_csv('data/validation/BrainCloud-W5-bulk-expression-data-scRNA-filtered.csv')
|
pandas.read_csv
|
import json
import ijson
import base64
import cv2
import random
import requests
import matplotlib.pyplot as plt
from PIL import Image
import pandas as pd
import numpy as np
import os.path
import decimal
def create_dataframe(relative_dir ='/../raw_data/dataset_062120.json', image_size=(224,224)):
"""
convert the JSON file of my research into a dataframe with the following columns:
eyeImage: np array with shape @image_size
leftEye: np array with shape (6,2): 6 left eye landmarks positions in the form of (x,y), where -1<=x,y<=1
rightEye: np array with shape (6,2): 6 right eye landmarks positions in the form of (x,y), where -1<=x,y<=1
y: the position that the user looks at on the screen in the form of (x,y), where -1<=x,y<=1
"""
f = open(os.path.join(os.path.dirname(__file__))+relative_dir)
eyeImages,leftEyes,rightEyes,ys = [], [], [], []
# for dealing with error in json.dumps
def decimal_default(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
for item in ijson.items(f, "item"):
# convert the string into a python dict
temp = json.dumps(item, default=decimal_default)
temp = json.loads(temp)
# convert the image into the np array
eyeImage = convert_base64_to_nparray(temp["x"]["eyeImage"],image_size)
eyeImages.append(eyeImage)
leftEye = np.array(temp["x"]["eyePositions"]["leftEye"]).reshape(12)
leftEyes.append(leftEye)
rightEye = np.array(temp["x"]["eyePositions"]["rightEye"]).reshape(12)
rightEyes.append(rightEye)
y = np.array(temp["y"])
ys.append(y)
list_of_tuples = list(zip(eyeImages,leftEyes,rightEyes,ys))
df = pd.DataFrame(list_of_tuples, columns =['eyeImage','leftEye', 'rightEye', 'y'])
return df
# def create_dataframe(relative_dir ='/../raw_data/dataset_062120.json' ):
# """
# convert the JSON file of my research into a dataframe with the following columns:
# eyeImage: np array with shape (25, 50, 3)
# leftEye: np array with shape (6,2): 6 left eye landmarks positions in the form of (x,y), where -1<=x,y<=1
# rightEye: np array with shape (6,2): 6 right eye landmarks positions in the form of (x,y), where -1<=x,y<=1
# y: the position that the user looks at on the screen in the form of (x,y), where -1<=x,y<=1
# """
# f = open(os.path.join(os.path.dirname(__file__))+relative_dir)
# eyeImages,leftEyes,rightEyes,ys = [], [], [], []
# for item in ijson.items(f, "item"):
# # convert the string into a python dict
# temp = json.loads(item)
# # convert the image into the np array
# eyeImage = convert_base64_to_nparray(temp["x"]["eyeImage"])
# eyeImages.append(eyeImage)
# leftEye = np.array(temp["x"]["eyePositions"]["leftEye"]).reshape(12)
# leftEyes.append(leftEye)
# rightEye = np.array(temp["x"]["eyePositions"]["rightEye"]).reshape(12)
# rightEyes.append(rightEye)
# y = np.array(temp["y"])
# ys.append(y)
# list_of_tuples = list(zip(eyeImages,leftEyes,rightEyes,ys))
# df = pd.DataFrame(list_of_tuples, columns =['eyeImage','leftEye', 'rightEye', 'y'])
# return df
def create_train_validation(df, train_percentage=0.8):
"""
Take in original dataframe and split it into train&validation set
Follow the 70/30 convention
input:
df
output:
X_train
X_validation
Y_train,
Y_validation
"""
train = df.sample(frac=train_percentage).sort_index()
validation = df.drop(train.index).sort_index()
return train.iloc[:,0:3], validation.iloc[:,0:3], pd.DataFrame(train.iloc[:,-1]), pd.DataFrame(validation.iloc[:,-1])
def convert_base64_to_nparray(image_data, image_size):
"""
takes in *a* base64 encoding image and convert it into a numpy array of shape (256,256,3)
every element in the array ranges from 0 to 1
"""
image = image_data.split(",")[1]
image = np.asarray(bytearray(base64.b64decode(image)), dtype="uint8")
image = cv2.imdecode(image, cv2.COLOR_BGR2RGB)
res = cv2.resize(image, dsize=image_size, interpolation=cv2.INTER_CUBIC)
# get a normal color as the openCV use BGR in default
RGB_img = cv2.cvtColor(res, cv2.COLOR_BGR2RGB)
RGB_img = RGB_img/255.
return RGB_img
def save_jpgs(relative_dir ='/../raw_data/dataset_062120.json' ):
"""
convert the JSON file of my research into a dataframe with the following columns:
eyeImage: np array with shape (25, 50, 3)
leftEye: np array with shape (6,2): 6 left eye landmarks positions in the form of (x,y), where -1<=x,y<=1
rightEye: np array with shape (6,2): 6 right eye landmarks positions in the form of (x,y), where -1<=x,y<=1
y: the position that the user looks at on the screen in the form of (x,y), where -1<=x,y<=1
"""
f = open(os.path.join(os.path.dirname(__file__))+relative_dir)
i = 0
for item in ijson.items(f, "item"):
# convert the string into a python dict
temp = json.loads(item)
# convert the image into the np array
eyeImage = save_nparray_to_jpg(temp["x"]["eyeImage"],i)
i += 1
def save_nparray_to_jpg(image_data,idx):
image = image_data.split(",")[1]
image = np.asarray(bytearray(base64.b64decode(image)), dtype="uint8")
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
im = Image.fromarray(image)
im.save("image_data/"+str(idx)+".jpeg")
def create_binary_labels(y_labels):
"""
takes in our original y labels, each label is in the form of [x,y]
convert the original labels into a new y labels which is "R" when x>0, "L" elsewise
input:
y_labels: a 1*n pandas df with its element in the form of [x,y]
output:
a converted binary label with its element == R if x>0 eles L
"""
binary_labels =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Module specifically for the analysis of utility
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
from scipy import stats
plt.style.use('seaborn-paper')
plt.rcParams['svg.fonttype'] = 'none'
SMALL_SIZE = 10
MEDIUM_SIZE = 12
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rc('errorbar', capsize = 3)
#plt.rcParams['font.family'] = 'sans-serif'
from macaque.f_toolbox import *
tqdm = ipynb_tqdm()
#%%
def compare_rangeAndUtility(Trials, fractile, fractile_MLE, binary = None, binary_MLE = None):
'''
'''
# sb.set_context("paper")
results = [[],[],[]]
cum_mags = []
norm = lambda x: (x - min(x)) / (max(x) - min(x))
if np.size(binary) > 1:
dating = np.unique(np.hstack((fractile_MLE.date.unique(), binary_MLE.date.unique())))
else:
dating = fractile_MLE.date.unique()
fractile = fractile.loc[np.isin(fractile.sessionDate.values, dating)].copy()
if np.size(binary) > 1:
binary = binary.loc[np.isin(binary.sessionDate.values, dating)].copy()
fig, ax = plt.subplots( 1, 1, squeeze = False, figsize=(6,24))
tt_fractile = fractile.getTrials(Trials)
if np.size(binary) > 1:
tt_binary = binary.getTrials(Trials)
if np.size(binary) > 1:
allTrials = pd.concat((tt_fractile, tt_binary))
else:
allTrials = tt_fractile
allTrials = allTrials.drop_duplicates(['sessionDate','time'])
for i, date in enumerate(np.sort(allTrials.sessionDate.unique())):
rr_1 = fractile_MLE.loc[fractile_MLE.date == date].mag_range.values
if np.size(binary) > 1:
rr_2 = binary_MLE.loc[binary_MLE.date == date].mag_range.values
else:
rr_2 = []
rr = unique_listOfLists(np.hstack((rr_1, rr_2)))[0]
ff = fractile_MLE.loc[fractile_MLE.date == date]
util = ff.iloc[-1].full_model.model_parts['utility'](np.linspace(0,1))
# ranging = ff.mag_range.values
df = allTrials.loc[allTrials.sessionDate == date]
mA = flatten([np.array(options)[0::2] for options in df.gambleA.values])
mB = flatten([np.array(options)[0::2] for options in df.gambleB.values])
allMagnitudes = np.hstack((mA,mB))
mean = np.mean(allMagnitudes, 0)
std = np.std(allMagnitudes, 0)
min_m = min(allMagnitudes)
max_m = max(allMagnitudes)
cum_mags.extend(allMagnitudes)
inflection = np.linspace(rr[0], rr[1])[np.gradient((util)) == max(np.gradient((util)))]
cum_mean = np.mean(cum_mags, 0)
ax[0,0].plot(np.linspace(rr[0], rr[1]), (1-util)+i-0.5, color = 'black')
ax[0,0].plot(np.linspace(rr[0], rr[1]), norm(np.gradient((1-util)))+i-0.5,
color = 'darkred', alpha = 0.4)
ax[0,0].plot([inflection,inflection], [i-.5, i+.5], '--',color = 'darkred')
# ax[0,0].plot([cum_mean,cum_mean], [i, i+1], '--',color = 'blue')
ax[0,0].plot(mean, i, marker = 'o', color = 'black')
ax[0,0].plot(rr, [i,i], '--', color = 'grey')
ax[0,0].scatter(mean-std, i, marker = "|", color = 'black')
ax[0,0].scatter(mean+std, i, marker = "|", color = 'black')
# print(np.linspace(rr[0], rr[1])[np.gradient((util)) == max(np.gradient((util)))] )
if sum(rr) == 0.5:
results[0].extend([inflection - mean])
if sum(rr) == 1.0 or sum(rr) == 1.4000000000000001:
results[1].extend([inflection - mean])
if sum(rr) == 1.5:
results[2].extend([inflection - mean])
ax[0,0].set_ylabel('reward magnitude')
ax[0,0].set_xlabel('testing session')
plt.gca().invert_yaxis()
# if np.size(binary) > 1:
# ax[0,0].set_ylim([-0.05, 1.05])
results = [np.array(flatten(rr)) for rr in results]
index = flatten([[i]*len(rr) for i, rr in enumerate(results)])
results = flatten(results)
df = pd.DataFrame(np.vstack((results, index)).T, columns = ['diff','range'])
data = []
for ff in np.unique(df.range.values):
where = np.array(df.range.values) == ff
data.append(df['diff'].values[where])
data = np.array(data)
from scipy import stats
import statsmodels.api as sm
from statsmodels.formula.api import glm
from statsmodels.stats.anova import anova_lm
print('\n=============================')
print(stats.mstats.kruskalwallis(*[data[x] for x in np.arange(data.shape[0])]))
print('\n')
post = sm.stats.multicomp.MultiComparison(df['diff'], df.range)
print(post.allpairtest(stats.ranksums, method = 'holm')[0])
[print(stats.ttest_1samp(data[x], 0)) for x in np.arange(data.shape[0])]
return
#%%
def LR_lossMetric(fractile_MLE, fractile, Trials):
'''
'''
from macaque.f_models import define_model
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import itertools
from macaque.f_models import trials_2fittable
from scipy.stats import sem
import statsmodels.api as sm
tt = fractile.getTrials(Trials)
fig, ax = plt.subplots( 1, 3, squeeze = False, figsize=(10,6))
palette = itertools.cycle(sb.color_palette('colorblind'))
for i, rr in enumerate(unique_listOfLists(fractile_MLE.mag_range.values)):
cc = next(palette)
df = fractile_MLE.loc[fractile_MLE.mag_range.apply(lambda x: x == rr)]
allData= []
for date in tqdm(df.date.values , desc='gathering daily trial data'):
X, Y = trials_2fittable(tt.loc[tt['sessionDate'] == date], use_Outcomes=True)
outcomes = X.outcomes.values[:-1]
switch = np.abs(np.diff(Y))
dataset = []
for reward in np.unique(outcomes):
index = (outcomes == reward)
if sum(index) > 10:
dataset.append([reward, np.mean(switch[index])])
dataset = np.vstack(dataset)
ax[0,i].scatter(dataset[:,0], dataset[:,-1], alpha = 0.2, color = cc)
allData.append(dataset)
ax[0,i].axhline(0.5, color = 'k')
allData = np.vstack(allData); datapoints = []
for reward in np.unique(allData[:,0]):
index = (allData[:,0] == reward)
datapoints.append([reward, np.mean(allData[:,1][index]), sem(allData[:,1][index])])
datapoints = np.vstack(datapoints)
# ax[0,i].plot(datapoints[:,0], datapoints[:,1], color = cc)
ax[0,i].set_ylabel('proportion of side switches (0 no switch)')
ax[0,i].set_xlabel('past outcome EV')
ax[0,i].set_ylim([0,1])
squarePlot(ax[0,i])
mod = sm.OLS(allData[:,1], sm.add_constant(allData[:,0])).fit()
print('Range: ', rr, '===================================')
print(mod.summary())
ax[0,i].plot(np.linspace(min(allData[:,0]), max(allData[:,0])),
(np.linspace(min(allData[:,0]), max(allData[:,0])) * mod.params[-1]) + mod.params[0] ,'--', color = cc )
plt.tight_layout()
#%%
fig, ax = plt.subplots( 1, 3, squeeze = False, figsize=(10,6))
palette = itertools.cycle(sb.color_palette('colorblind'))
for i, rr in enumerate(unique_listOfLists(fractile_MLE.mag_range.values)):
cc = next(palette)
df = fractile_MLE.loc[fractile_MLE.mag_range.apply(lambda x: x == rr)]
allData= []
for date in tqdm(df.date.values , desc='gathering daily trial data'):
X, Y = trials_2fittable(tt.loc[tt['sessionDate'] == date], use_Outcomes=True)
gA = np.vstack(X[[ 'A_m1', 'A_p1', 'A_m2', 'A_p2' ]].values)
gB = np.vstack(X[[ 'B_m1', 'B_p1', 'B_m2', 'B_p2' ]].values)
chA = Y
outcomes = X.outcomes.values
ggA = np.array([not all(left[-2:]) == 0 for left in gA])
ggB = np.array([not all(right[-2:]) == 0 for right in gB])
a_is_gamble_is_chosen = (ggA == True) & (chA == 1)
b_is_gamble_is_chosen = (ggB == True) & (chA == 0)
where = (a_is_gamble_is_chosen == True) | (b_is_gamble_is_chosen == True)
switch = np.abs(np.diff(where))
where = where[:-1]
outcomes = outcomes[:-1][where]
switch = switch[where]
dataset = []
for reward in np.unique(outcomes):
index = (outcomes == reward)
if sum(index) > 5:
dataset.append([reward, np.mean(switch[index])])
dataset = np.vstack(dataset)
ax[0,i].scatter(dataset[:,0], dataset[:,-1], alpha = 0.2, color = cc)
allData.append(dataset)
ax[0,i].axhline(0.5, color = 'k')
allData = np.vstack(allData); datapoints = []
for reward in np.unique(allData[:,0]):
index = (allData[:,0] == reward)
datapoints.append([reward, np.mean(allData[:,1][index]), sem(allData[:,1][index])])
datapoints = np.vstack(datapoints)
ax[0,i].set_ylabel('proportion of gamble/safe switches (0 no switch)')
ax[0,i].set_xlabel('past outcome EV')
ax[0,i].set_ylim([0,1])
squarePlot(ax[0,i])
mod = sm.OLS(allData[:,1], sm.add_constant(allData[:,0])).fit()
print('Range: ', rr, '===================================')
print(mod.summary())
ax[0,i].plot(np.linspace(min(allData[:,0]), max(allData[:,0])),
(np.linspace(min(allData[:,0]), max(allData[:,0])) * mod.params[-1]) + mod.params[0] ,'--', color = cc )
plt.tight_layout()
#%%
def correlate_dailyParameters(fractile_MLE, revertLast = False):
'''
'''
from macaque.f_models import define_model
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import itertools
import statsmodels.api as sm
# fractile_MLE
mle, legend, ranges, c_specific = extract_parameters(fractile_MLE, dataType = 'mle', minTrials = 40, revertLast = revertLast)
behaviour, legend, ranges, c_specific = extract_parameters(fractile_MLE, dataType = 'behaviour', minTrials = 40, revertLast = revertLast)
mle = [parameters[:,2:] for parameters in mle]
palette = itertools.cycle(sb.color_palette('colorblind'))
fig, ax = plt.subplots( 1, 2, squeeze = False, figsize=(10,6))
for mm, bb, rr, color in zip(mle, behaviour, ranges, c_specific):
mm = np.log(mm); bb = np.log(bb)
print(' REGRESSION Range: ', rr, ' ============================================= ' )
ax[0,0].scatter(bb[:,0], mm[:,0], color = color)
x = bb[:,0]; y = mm[:,0]
x = sm.add_constant(x, prepend=True)
mod = sm.OLS(y, x).fit()
print(' parameter temperature: --------------- ' )
print('slope: ', mod.params[-1], '; p-val: ', mod.pvalues[-1], '; R^2: ' ,mod.rsquared)
# -------------------------------------------------------------------------------------
ax[0,1].scatter(bb[:,-1], mm[:,-1], color = color)
x = bb[:,-1]; y = mm[:,-1]
x = sm.add_constant(x, prepend=True)
mod = sm.OLS(y, x).fit()
print(' parameter height: --------------- ' )
print('slope: ', mod.params[-1], '; p-val: ', mod.pvalues[-1], '; R^2: ' ,mod.rsquared)
# -------------------------------------------------------------------------------------
bb_mags = np.log(np.vstack(behaviour))
mm_mags = np.log(np.vstack(mle))
x = bb_mags[:,0]; y = mm_mags[:,0]
x = sm.add_constant(x, prepend=True)
mod = sm.OLS(y, x).fit()
print('General correlation of temperature: ======================================================')
print(mod.summary())
x = bb_mags[:,1]; y = mm_mags[:,1]
x = sm.add_constant(x, prepend=True)
mod = sm.OLS(y, x).fit()
print('General correlation of height: ======================================================')
print(mod.summary())
ax[0,0].plot(np.linspace(min(bb_mags[:,0]), max(bb_mags[:,0])), np.linspace(min(bb_mags[:,0]),max(bb_mags[:,0])), '--', color = 'k')
ax[0,1].plot(np.linspace(min(bb_mags[:,-1]), max(bb_mags[:,-1])), np.linspace(min(bb_mags[:,-1]),max(bb_mags[:,-1])), '--', color = 'k')
ax[0,0].grid(); ax[0,1].grid()
ax[0,0].set_ylabel('temperature MLE'); ax[0,0].set_xlabel('temperature Fractile')
ax[0,1].set_ylabel('height MLE'); ax[0,1].set_xlabel('height Fractile')
squarePlot(ax[0,0]); squarePlot(ax[0,1])
#%%
ff = define_model(fractile_MLE.model_used.iloc[-1])
p0 = ff[1]
utility = lambda pp: ff[-1](p0)['empty functions']['utility'](np.linspace(0,1,100), pp)
all_bbs = []; all_mms = []
palette = itertools.cycle(sb.color_palette('colorblind'))
fig, ax = plt.subplots( 1, 1, squeeze = False, figsize=(6,6))
for mm, bb, rr, color in zip(mle, behaviour, ranges, c_specific):
print('Range: ', rr, ' ============================================= ' )
bb_area = [np.sum(utility(pp))/100 for pp in bb]
mm_area = [np.sum(utility(pp))/100 for pp in mm]
ax[0,0].scatter(bb_area,mm_area, color = color)
x = bb_area; y = mm_area
x = sm.add_constant(x, prepend=True)
mod = sm.OLS(y, x).fit()
print(' parameter temperature: --------------- ' )
print('slope: ', mod.params[-1], '; p-val: ', mod.pvalues[-1], '; R^2: ' ,mod.rsquared)
# -------------------------------------------------------------------------------------
all_bbs.extend(bb_area)
all_mms.extend(mm_area)
ax[0,0].legend(legend); ax[0,0].grid()
ax[0,0].plot(np.linspace(0,1), np.linspace(0,1), color = 'k')
squarePlot(ax[0,0])
x = all_bbs; y = all_mms
x = sm.add_constant(x, prepend=True)
mod = sm.OLS(y, x).fit()
print('General correlation area under the curve: ======================================================')
print(mod.summary())
ax[0,0].plot(np.linspace(0, 1),
(np.linspace(0,1) * mod.params[-1]) + mod.params[0] , '--', color = 'k' )
ax[0,0].set_ylabel('Area under curve MLE'); ax[0,0].set_xlabel('Area under curve Fractile')
print(' =========================================================================')
print(' =========================================================================')
#%%
def quantifyAdaptation(MLE_df, dataType='behaviour', revertLast = False):
'''
'''
from scipy import stats
from macaque.f_models import define_model
dataList, legend, ranges, c_specific = extract_parameters(MLE_df, dataType = dataType, minTrials = 40, revertLast = revertLast)
if dataType == 'mle':
dataList = [ff[:,2:4] for ff in dataList]
#%%
from macaque.f_uncertainty import bootstrap_sample, bootstrap_function
import statsmodels.api as sm
fig, ax = plt.subplots( 1, 2, squeeze = False, figsize=(10,6))
ff = define_model(MLE_df.model_used.iloc[-1])
p0 = ff[1]
utility = ff[-1](p0)['empty functions']['utility']
i = 0; past_integral= 0
print('-------------------------------------------------')
for params, rr, color in zip(dataList, ranges, c_specific):
uu = lambda pp: utility(np.linspace(0,1,100), pp)
mean, lower, upper = bootstrap_function(uu, params, 'median')
print(rr, '/ median = ', np.median(params, 0), ' / CI = ', bootstrap_sample(params, 'median'))
ax[0,0].plot(np.linspace(0,1,100), mean, color = color)
ax[0,0].fill_between(np.linspace(0,1,100), y1=lower, y2=upper, alpha=0.25, color = color)
integral = np.sum(mean)
quantification = (integral - past_integral) / past_integral
if past_integral != 0:
print('relative percent change: ', quantification, 'for range ', rr)
# dailyDifferences = [np.sum(uu(pp)) - past_integral for pp in params]
# ax2[0,i].plot(dailyDifferences)
# dailyDifferences = sm.add_constant(dailyDifferences)
# rlm_model = sm.RLM(np.arange(0, len(dailyDifferences)), dailyDifferences).fit()
# print(rlm_model.params, rlm_model.pvalues)
#
i +=1
past_integral = integral
ax[0,0].set_xlabel('reward magnitude')
ax[0,0].set_ylabel('median utility')
ax[0,0].legend(legend)
ax[0,0].plot(np.linspace(0,1,100), np.linspace(0,1,100), '--', color='k')
squarePlot(ax[0,1])
plt.suptitle('side-by-side utilities')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
#%%
fig2, ax2 = plt.subplots( 1, 3, squeeze = False, figsize=(10,6))
n=0; print('\n')
for params, rr in zip(dataList, ranges):
uu = lambda pp: utility(np.linspace(0,1,100), pp)
past = 0; y = []
for i,pp in enumerate(params):
if past != 0 :
y.extend([(np.sum(uu(pp)) - past) / past])
past = np.sum(uu(pp))
# x = np.arange(0, len(y))
# x = sm.add_constant(x)
# rlm_model = sm.OLS(y, x).fit()
t,p = stats.ttest_1samp(y, 0)
print('adaptation within: ', rr, ' t: ', t, ' p: ', p, ' mean: ', np.mean(y))
ax2[0,n].plot(y)
ax2[0,n].axhline(0, color = 'k')
squarePlot(ax2[0,n])
n += 1
#%%
print('\n')
reference= ranges
index = np.arange(0, len(ranges))[flatten(np.diff(ranges) == 1)]
if np.size(index) == 0:
index = [0]
uu = lambda pp: utility(np.linspace(0,1,200), pp)
mean_large_200, _, _ = bootstrap_function(uu, dataList[index[0]], 'median')
# mean_large_200 = (mean_large_200 * (np.max(ranges[index]) - np.min(ranges[index]))) + np.min(ranges[index])
# mean = mean/1.3
top2 = mean_large_200[-1 + int(((min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[:,1]) - min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:])) / (max(np.vstack(np.sort(unique_listOfLists(ranges), 0))[:,1]) - min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:]))*200))]
bottom2 = mean_large_200[0]
uu = lambda pp: utility(np.linspace(0,1,100), pp)
mean_large_100, _, _ = bootstrap_function(uu, dataList[index[0]], 'median')
# mean_large_100 = (mean_large_100 * (np.max(ranges[index]) - np.min(ranges[index]))) + np.min(ranges[index])
spreads = np.diff(np.sort(unique_listOfLists(ranges), 0))
means = np.mean(np.sort(unique_listOfLists(ranges), 0), 1)
for params, rr, color in zip(dataList, ranges, c_specific):
if np.diff(rr)==max(spreads): #if the parameters are from the full range
uu = lambda pp: utility(np.linspace(0,1,100), pp)
mean, lower, upper = bootstrap_function(uu, params, 'median')
ax[0,1].plot(np.linspace(rr[0],rr[1],100), mean, color=color)
ax[0,1].fill_between(np.linspace(rr[0],rr[1],100), y1=lower, y2=upper, alpha=0.25, color=color)
elif np.diff(rr)<max(spreads) and np.mean(rr) == min(means):
# if the parameters are from the low range
if rr[0] != reference[index].min():
# for ugo -> if the 0 of full rnage doesnt match low range
uu = lambda pp: utility(np.linspace(0,1,100), pp)
mean, lower, upper = bootstrap_function(uu, params, 'median')
bottom2 = -mean[int(((min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:]) - min(rr)) / (max(rr) - min(rr))*100))]
# bottom2 = bottom2/1.3
bottom2 = (bottom2*(top2-0))+0
ax[0,1].plot(np.linspace(rr[0],rr[1],100), (mean*(top2-bottom2))+bottom2, color=color)
ax[0,1].fill_between(np.linspace(rr[0],rr[1],100), y1=(lower*(top2-bottom2))+bottom2, y2=(upper*(top2-bottom2))+bottom2, alpha=0.25, color=color)
integral = sum((mean*(top2-bottom2))+bottom2)
no_adaptation = sum(mean_large_200[:int(((min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[:,1]) - min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:])) / (max(np.vstack(np.sort(unique_listOfLists(ranges), 0))[:,1]) - min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:]))*200))])
full_adaptation = sum((mean_large_100*(top2-bottom2))+bottom2)
max_area = full_adaptation - no_adaptation
adaptation_percentage = (integral - no_adaptation) / max_area
ax[0,1].plot(np.linspace(rr[0],rr[1],100), (mean_large_100*(top2-bottom2))+bottom2, '--', color=color)
else:
uu = lambda pp: utility(np.linspace(0,1,100), pp)
mean, lower, upper = bootstrap_function(uu, params, 'median')
integral = sum((mean*(top2-bottom2))+bottom2)
no_adaptation = sum(mean_large_200[:int(((min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[:,1]) - min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:])) / (max(np.vstack(np.sort(unique_listOfLists(ranges), 0))[:,1]) - min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:]))*200))])
full_adaptation = sum((mean_large_100*(top2-bottom2))+bottom2)
max_area = full_adaptation - no_adaptation
adaptation_percentage = (integral - no_adaptation) / max_area
print('lower range adaptation percentage: ', adaptation_percentage)
# print('lower GAC bound: ', adaptation_percentage)
# print('upper GAC bound: ', adaptation_percentage)
ax[0,1].plot(np.linspace(rr[0],rr[1],100), (mean*(top2-bottom2))+bottom2, color=color)
ax[0,1].fill_between(np.linspace(rr[0],rr[1],100), y1=(lower*(top2-bottom2))+bottom2, y2=(upper*(top2-bottom2))+bottom2, alpha=0.25, color=color)
ax[0,1].plot(np.linspace(rr[0],rr[1],100), (mean_large_100*(top2-bottom2))+bottom2, '--', color=color)
elif np.diff(rr)<max(spreads) and np.mean(rr) == max(means):
# if the parameters are from the high range
uu = lambda pp: utility(np.linspace(0,1,100), pp)
mean, lower, upper = bootstrap_function(uu, params, 'median')
ax[0,1].plot(np.linspace(rr[0],rr[1],100), (mean*(1.0-top2))+top2, color=color)
ax[0,1].fill_between(np.linspace(rr[0],rr[1],100), y1=(lower*(1.0-top2))+top2, y2=(upper*(1.0-top2))+top2, alpha=0.25, color=color)
ax[0,1].plot(np.linspace(rr[0],rr[1],100),(mean_large_100*(1.0-top2))+top2, '--', color=color)
integral = sum((mean*(1.0-top2))+top2)
no_adaptation = sum(mean_large_200[int(((min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[:,1]) - min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:])) / (max(np.vstack(np.sort(unique_listOfLists(ranges), 0))[:,1]) - min(np.vstack(np.sort(unique_listOfLists(ranges), 0))[1,:]))*200)):])
full_adaptation = sum((mean_large_100*(1.0-top2))+top2)
max_area = full_adaptation - no_adaptation
adaptation_percentage = (integral - no_adaptation) / max_area
print('higher range adaptation percentage: ', adaptation_percentage)
ax[0,1].set_xlabel('reward magnitude')
ax[0,1].set_ylabel('median utility')
ax[0,1].legend(legend)
ax[0,1].plot(np.linspace(0,reference[index].max(),100), np.linspace(0,1,100), '--', color='k')
squarePlot(ax[0,1])
plt.suptitle('overlapping utilities')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
#%%
def compare_reactionTimes(fractile, fractile_MLE, Trials, revertLast = False):
'''
'''
import seaborn as sns
import scipy.optimize as opt
import matplotlib.colors as mcolors
import matplotlib.cm as cm
fig, ax = plt.subplots( 1, 3, squeeze = False, figsize=( 6 ,3 ))
normalize = mcolors.Normalize(vmin=-.8, vmax=0.8)
colormap = cm.winter
scalarmappaple = cm.ScalarMappable(norm=normalize, cmap=colormap)
f_data, legend, ranking1, c_specific = extract_parameters(fractile_MLE, dataType = 'behaviour', minTrials = 40, revertLast = revertLast)
Xs, Ys, Zs, ranges = [[],[],[],[]]
for i, rr in enumerate(ranking1):
df = fractile.loc[fractile.reward_range.apply(lambda x: all(x==rr))]
temp = df.choiceTimes.values
utilities = df.utility.values
EVs = df.primaryEV.values
secondaries = np.array([list(cc.keys()) for cc in temp])
times = np.array([list(cc.values()) for cc in temp])
for uu in np.unique(utilities):
where = (utilities == uu)
ev = EVs[where]
ss = secondaries[where]
tt = flatten([[np.mean(pp) for pp in points] for points in times[where]])
z = flatten([m-np.array(n) for m,n in zip(ev, ss)])
y = tt
x = [uu] * len(tt)
if uu == 0.5:
scalarmappaple.set_array(z)
plt.colorbar(scalarmappaple, ax=ax[0,i], fraction=0.046, pad=0.04, label='gEV - sEV')
ax[0,i].scatter(x-(np.array(z)/7), y, c=z, cmap='winter', alpha=0.2)
ax[0,i].grid()
# ax[0,1].axhline(min(flatten(times)), color='k', linestyle = '--')
ax[0,i].set_xlabel('reward utility')
ax[0,i].set_ylabel('response times')
Xs.extend(x); Ys.extend(y); Zs.extend(z); ranges.extend([i]*len(x))
ax[0,i].set_title(rr)
squarePlot(ax[0,i])
plt.tight_layout()
variables = np.vstack((Ys, Xs, np.abs(Zs), ranges, np.ones(len(Ys)))).T
df = pd.DataFrame(variables, columns = ['RTs', 'utility_level', 'delta_EV', 'range', 'constant'])
plt.figure()
sb.boxplot(x='range', y='RTs', hue='utility_level', data=df, palette='winter', showfliers=False).set( xticklabels=ranking1)
plt.grid()
plt.figure()
g = sb.lmplot(x='delta_EV', y='RTs', hue='range', data=df, scatter_kws={'alpha':0.2})
#%%
import statsmodels.api as sm
from statsmodels.formula.api import glm
from statsmodels.stats.anova import anova_lm
from statsmodels import graphics
g = sb.pairplot(df, hue='range', size=2.5, plot_kws=dict(s=80, edgecolor="white", linewidth=2.5, alpha=0.3))
for t, l in zip(g._legend.texts, legend): t.set_text(l)
# plt.legend(legend)
fig = plt.gcf()
fig.tight_layout()
formula = 'RTs ~ delta_EV + utility_level + C(range) + delta_EV:utility_level + C(range):utility_level'
glm_model = glm(formula=formula, data=df, family=sm.families.Gamma(link = sm.genmod.families.links.identity)).fit()
# glm_model = ols(formula=formula, data=df, family='Gamma').fit()
# glm_model = sm.GLM(endog=df.RTs, exog=df[['constant','utility_level','delta_EV','range']], family=sm.families.Poisson()).fit()
print(glm_model.summary2())
graphics.gofplots.qqplot(glm_model.resid_response, line='r')
print('\n ----------------------------------------------',
'\n post-hoc (bonferroni) on range term:')
print(glm_model.t_test_pairwise('C(range)', method='bonferroni').result_frame[['coef','pvalue-bonferroni','reject-bonferroni']])
# sm.graphics.plot_ccpr_grid(glm_model)
# ff = plt.gcf()
# ff.tight_layout()
# sm.graphics.plot_partregress_grid(glm_model)
# sns.pairplot(df, x_vars=['utility_level', 'delta_EV', 'range'], y_vars='RTs', size=7, aspect=0.7, kind='reg')
#%%
def compare_inflections(fractile_MLE, revertLast = False, dataType = 'behaviour'):
'''
'''
from scipy import stats
import scipy.optimize as opt
import itertools
import statsmodels.api as sm
from macaque.f_Rfunctions import dv2_manova
def plotting_iqr(ff):
med = np.median(ff, 0)
err = med - np.percentile(ff, [25, 75], 0)
# upper_err = np.percentile(ff, 75, 0) - med
return np.abs(err)
f_data, legend, ranking1, c_specific = extract_parameters(fractile_MLE, dataType = dataType, minTrials = 40, revertLast = revertLast)
palette = itertools.cycle(sb.color_palette('colorblind'))
if dataType == 'mle':
f_data = [ff[:,2:4] for ff in f_data]
fig, ax = plt.subplots( 1, 3, squeeze = False, figsize=(12,6))
gap = 0
model_parts = fractile_MLE.full_model.iloc[-1]
model = model_parts.model_parts['empty functions']['utility']
for ff,rr, color in zip(f_data, ranking1, c_specific):
# color = next(palette)
ax[0,0].bar(np.array([0,1])+gap, np.mean(ff, 0), 0.2, yerr = plotting_iqr(ff), color = color)
print('median parameters: ', np.median(ff), ' ; for range ', rr)
print('inter quartile rangge: ' , stats.iqr(ff, 0))
gap += 0.25
ax[0,0].legend(legend)
ax[0,0].set_xticks([0.25, 1.25])
ax[0,0].set_xticklabels(['p1','p2'])
#%%
f_data = np.vstack((np.vstack(f_data).T, np.hstack([len(ff) * [i] for i, ff in enumerate(f_data)]))).T
manovaData = np.vstack((f_data))
print('\n ==============================================')
print('2-WAY mANOVA ON UTILITY PARAMETERS (METHODS AND RANGE)')
print('-------------------------------------------- \n')
dv2_manova(DV1 = np.log(manovaData[:,0]), DV2= np.log(manovaData[:,1]), IV = manovaData[:,-1])
#%%
cc_ratio = []; inflection = []
ranking = ranking1
for dataset in tqdm(manovaData, desc='Gathering Inflections'):
utility = model(np.linspace(0,1,10000), dataset[:2])
where = np.argmax(np.gradient(utility))
shape = np.gradient(np.gradient(utility))
shape[shape<=0] = 0
shape[shape>0] = 1
integral = np.sum(utility) / len(utility)
cc_ratio.extend([integral])
# quantification = (integral - past_integral) / past_integral
inflection.extend([np.linspace(ranking[int(dataset[2])][0],ranking[int(dataset[2])][1],10000)[where]])
inflection = np.array(inflection)
cc_ratio = np.array(cc_ratio)
mData = [inflection[manovaData[:,-1] == i] for i in np.unique(manovaData[:,-1])]
index = [manovaData[manovaData[:,-1] == i, -1] for i in np.unique(manovaData[:,-1])]
correlations=[]
gap = 0; coloration = 0
use_range = ranking
palette = itertools.cycle(sb.color_palette('colorblind'))
past = []
for mm, ii, rr, color in zip(mData, index, use_range, c_specific):
# color = next(palette)
dataset = mm
if np.size(past) != 0 :
past = (past * (rr[1] - rr[0])) + rr[0]
ax[0,1].plot( [(ii*0.2) - 0.1, (ii*0.2) + 0.1],
[np.median(past), np.median(past)], '--', color='k')
past = np.array([0 if pp < 0 else pp for pp in past])
past = np.array([1 if pp > 1 else pp for pp in past])
t,p = stats.ranksums(dataset, past)
# t,p = stats.ttest_ind(dataset,past )
else:
t=np.nan; p=np.nan
x = np.arange(len(dataset))
x = sm.add_constant(x, prepend=True)
mod = sm.OLS(dataset, x).fit()
correlations.append([mod.params[-1], mod.pvalues[-1], t, p, len(dataset)])
# ax[0,1].arrow( x = (ii*0.5) + gap, y = np.mean(dataset) / 2,
# dx = 0, dy = np.sign(mod.params[-1]) * np.mean(dataset) / 4,
# fc="k", ec="k", head_width=0.05, head_length=0.05 )
ax[0,1].bar( (np.unique(ii)*0.2) , np.median(dataset), 0.2, color=color, alpha = 0.2)
ax[0,1].scatter( jitter(ii*0.2, 0.02) , dataset, color=color, alpha = 0.75)
past = (dataset - rr[0]) / (rr[1] - rr[0])
correlations = np.vstack((np.array(correlations).T, legend)).T
# correlations = np.vstack((np.array(correlations).T, flatten([[ll] * len(legend) for ll in ['fractile','binary']]))).T
print('\n =================================================')
print('inflection slopes:')
df =
|
pd.DataFrame(correlations, columns = [['slope','pval', 'past_t', 'past_p', 'N', 'range']])
|
pandas.DataFrame
|
import pandas as pd
from bs4 import BeautifulSoup
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class InshortsNews:
def __init__(self, category="national"):
self.category = category
self.url = "https://www.inshorts.com/en/read/" + str(self.category)
self.request_timeout = 120
self.headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"
}
self.session = requests.Session()
retries = Retry(
total=5, backoff_factor=0.5, status_forcelist=[502, 503, 504]
)
self.session.mount("http://", HTTPAdapter(max_retries=retries))
def __request(self):
try:
response = self.session.get(
self.url, timeout=self.request_timeout, headers=self.headers
)
response.raise_for_status()
content = response.content.decode("utf-8")
return content
except Exception as e:
raise
def get_news(self):
content = self.__request()
soup = BeautifulSoup(content, "html.parser")
headings = [
text.span.text
for text in soup.find_all("div", attrs={"class": "news-card-title"})
]
newss = [
text.text.strip().split("\n\n")[0]
for text in soup.find_all(
"div", attrs={"class": "news-card-content"}
)
]
short_by = [
" ".join(text.div.text.strip().split("/ \n ")[0].split()[2:])
for text in soup.find_all("div", attrs={"class": "news-card-title"})
]
times = [
text.text.strip()
.split("\n\n")[1]
.split("/ \n ")[1]
.split(" ")[0:]
for text in soup.find_all("div", attrs={"class": "news-card-title"})
]
times = pd.to_datetime(
[" ".join(text[3:6] + text[:2]) for text in times]
)
data = pd.DataFrame(data=[headings, newss, short_by, times]).T
data.columns = ["headings", "news", "short_by", "time"]
data.sort_values("time", inplace=True, ascending=False)
data.reset_index(drop=True, inplace=True)
data["category"] = self.category
return data
def get_all_news(self):
categories = [
"national",
"business",
"sports",
"world",
"politics",
"technology",
"startup",
"entertainment",
"miscellaneous",
"hatke",
"science",
"automobile",
]
data = pd.DataFrame(
columns=["headings", "news", "short_by", "time", "category"]
)
for category in categories:
data1 = InshortsNews(category).get_news()
data =
|
pd.concat([data, data1], axis=0)
|
pandas.concat
|
import pandas as pd
import numpy as np
import sys
# outlier detection
from statsmodels.stats.stattools import medcouple
from sklearn.neighbors import LocalOutlierFactor
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import IsolationForest
sys.path.append('../src/')
from utils import log
from utils import cast
class RemoveFeatures:
"""
A class to preprocess the data. It has implemented two methods related with data preparation and one method that consolidate all.
...
Attributes
----------
features_type : dict[str : list[str]]
Dictionary that contains two keys: qualitatives and quantitatives. The values are the list of features names respectively.
html : str
Object where useful information is going to be stored in html code
logger : logging.RootLogger
Logger object to do the logging.
Methods
-------
consistency
This method check the consistency of the features
check_missing_values
This method handles the missing values based on the method specified
mean and median are supported
"""
def __init__(self,
html,
logger
):
self.html = html
self.logger = logger
def consistency(self, X_train, X_test, features_type):
"""
This function check the consistency of the features in sense of qualitative variables with many categories, just one category or a high proportion of records in one category. Regarding the quantitative variables, It just check if there is any value with a high proportion of records. These features will be removed.
Parameters
----------
X_train : pd.DataFrame
Train set information
X_test : pd.DataFrame
Test set information
features_type : dict[str : list[str]]
Dictionary that contains two keys: qualitatives and quantitatives. The values are the list of features names respectively.
Return
------
X_train : pd.DataFrame
Train set information
X_test : pd.DataFrame
Test set information
features_type : dict[str : list[str]]
Dictionary that contains two keys: qualitatives and quantitatives. The values are the list of features names respectively.
"""
X_train_c = X_train.copy()
X_test_c = X_test.copy()
if not self.html:
self.html = """<html><head>"""
#self.html += """<link rel = "stylesheet" href = "style.css"/>"""
self.html += """</head><body><h1><center>Processing Report</center></h1>"""
if not self.logger:
self.logger = log('../data/output/', 'logs.txt')
self.html += "<h2><center>Features' Consistency:</center></h2>"
# max categories to keep in features with many categories
max_cat = 10
vars_remove = []
vars_remove_quali = []
vars_remove_quanti = []
self.logger.info('Started to check the features consistency')
self.html += "<h3>Qualitative features removed:</h3>"
for x in features_type['qualitative']:
freq = X_train_c[x].value_counts(normalize = True)
freq_acum = np.cumsum(freq)
# features with many categories
if len(freq_acum) > max_cat:
# can we select the first max_cat - 1 categories
# the other categories will be recodified in 'other'
if freq_acum.iloc[max_cat - 1] >= 0.75:
keep_cat = freq_acum.iloc[:(max_cat - 1)].index
X_train_c[x] = np.where(X_train_c[x].isin(keep_cat), X_train_c[x], 'other')
self.logger.info('feature: ' + x + 're-categorized')
else:
vars_remove_quali.append(x)
freq_acum =
|
pd.DataFrame(freq_acum)
|
pandas.DataFrame
|
"""
A script to summarize data saved in the `data/furnmove_evaluations__test/` directory,
as a result of downloading data or running evaluation using the script:
`rl_multi_agent/scripts/run_furnmove_or_furnlift_evaluations.py`
Set the metrics, methods, dataset split and generate a csv-styled table of metrics and confidence
intervals. Particularly, which of the methods:
* grid_vision_furnmove: Four prominent methods (no comm, marginal, SYNC, central) for gridworld and
visual environments.
* grid_3agents: Marginal, SYNC, and central methods for three agent gridworld-FurnMove setting.
* vision_mixtures: Effect of number of mixture components m on SYNC’s performance (in FurnMove)
* vision_cl_ablation: Effect of CORDIAL (`cl`) on marginal, SYNC, and central methods.
* vision_3agents: Marginal, SYNC, and central methods for three agent FurnMove setting.
Run using command:
`python rl_multi_agent/analysis/summarize_furnmove_eval_results.py`
Also, see `rl_multi_agent/analysis/summarize_furnlift_eval_results.py` for a similar script for
FurnLift task.
"""
import glob
import json
import os
from collections import defaultdict
import numpy as np
import pandas as pd
from constants import ABS_PATH_TO_DATA_DIR
from rl_multi_agent.analysis.summarize_furnlift_eval_results import create_table
pd.set_option("display.max_rows", 500)
|
pd.set_option("display.max_columns", 500)
|
pandas.set_option
|
# coding=utf-8
# utilfuncs.py
# ======================================
# This file contains all functions that
# might be shared among different files
# in qteasy.
# ======================================
import numpy as np
import pandas as pd
import sys
import qteasy
from pandas import Timestamp
from datetime import datetime
TIME_FREQ_STRINGS = ['TICK',
'T',
'MIN',
'H',
'D', '5D', '10D', '20D',
'W',
'M',
'Q',
'Y']
PROGRESS_BAR = {0: '----------------------------------------', 1: '#---------------------------------------',
2: '##--------------------------------------', 3: '###-------------------------------------',
4: '####------------------------------------', 5: '#####-----------------------------------',
6: '######----------------------------------', 7: '#######---------------------------------',
8: '########--------------------------------', 9: '#########-------------------------------',
10: '##########------------------------------', 11: '###########-----------------------------',
12: '############----------------------------', 13: '#############---------------------------',
14: '##############--------------------------', 15: '###############-------------------------',
16: '################------------------------', 17: '#################-----------------------',
18: '##################----------------------', 19: '###################---------------------',
20: '####################--------------------', 21: '#####################-------------------',
22: '######################------------------', 23: '#######################-----------------',
24: '########################----------------', 25: '#########################---------------',
26: '##########################--------------', 27: '###########################-------------',
28: '############################------------', 29: '#############################-----------',
30: '##############################----------', 31: '###############################---------',
32: '################################--------', 33: '#################################-------',
34: '##################################------', 35: '###################################-----',
36: '####################################----', 37: '#####################################---',
38: '######################################--', 39: '#######################################-',
40: '########################################'
}
def mask_to_signal(lst):
"""将持仓蒙板转化为交易信号.
转换的规则为比较前后两个交易时间点的持仓比率,如果持仓比率提高,
则产生相应的补仓买入信号;如果持仓比率降低,则产生相应的卖出信号将仓位降低到目标水平。
生成的信号范围在(-1, 1)之间,负数代表卖出,正数代表买入,且具体的买卖信号signal意义如下:
signal > 0时,表示用总资产的 signal * 100% 买入该资产, 如0.35表示用当期总资产的35%买入该投资产品,如果
现金总额不足,则按比例调降买入比率,直到用尽现金。
signal < 0时,表示卖出本期持有的该资产的 signal * 100% 份额,如-0.75表示当期应卖出持有该资产的75%份额。
signal = 0时,表示不进行任何操作
input:
:param lst,ndarray,持仓蒙板
return: =====
op,ndarray,交易信号矩阵
"""
np.seterr(divide='ignore', invalid='ignore')
if lst.ndim == 2: # 如果输入信号是2D的,则逐行操作(axis=0)
# 比较本期交易时间点和上期之间的持仓比率差额,差额大于0者可以直接作为补仓买入信号,如上期为0.35,
# 本期0.7,买入信号为0.35,即使用总资金的35%买入该股,加仓到70%
op = (lst - np.roll(lst, shift=1, axis=0))
# 差额小于0者需要计算差额与上期持仓数之比,作为卖出信号的强度,如上期为0.7,本期为0.35,差额为-0.35,则卖出信号强度
# 为 (0.7 - 0.35) / 0.35 = 0.5即卖出50%的持仓数额,从70%仓位减仓到35%
op = np.where(op < 0, (op / np.roll(lst, shift=1, axis=0)), op)
# 补齐因为计算差额导致的第一行数据为NaN值的问题
# print(f'creating operation signals, first signal is {lst[0]}')
op[0] = lst[0]
else: # 如果输入信号是3D的,同样逐行操作,但Axis和2D情形不同(axis=1)
op = (lst - np.roll(lst, shift=1, axis=1))
op = np.where(op < 0, (op / np.roll(lst, shift=1, axis=1)), op)
op[:, 0, :] = lst[:, 0, :]
return op.clip(-1, 1)
def unify(arr):
"""调整输入矩阵每一行的元素,通过等比例缩小(或放大)后使得所有元素的和为1
example:
unify([[3.0, 2.0, 5.0], [2.0, 3.0, 5.0]])
=
[[0.3, 0.2, 0.5], [0.2, 0.3, 0.5]]
:param arr: type: np.ndarray
:return: ndarray
"""
if isinstance(arr, np.ndarray): # Input should be ndarray! got {type(arr)}'
s = arr.sum(1)
shape = (s.shape[0], 1)
return arr / s.reshape(shape)
if isinstance(arr, (int, float)):
return arr
raise TypeError(f'Input should be ndarray! got {type(arr)}')
def time_str_format(t: float, estimation: bool = False, short_form: bool = False):
""" 将int或float形式的时间(秒数)转化为便于打印的字符串格式
:param t: 输入时间,单位为秒
:param estimation:
:param short_form: 时间输出形式,默认为False,输出格式为"XX hour XX day XX min XX sec", 为True时输出"XXD XXH XX'XX".XXX"
:return:
"""
assert isinstance(t, float), f'TypeError: t should be a float number, got {type(t)}'
assert t >= 0, f'ValueError, t should be greater than 0, got minus number'
# debug
# print(f'time input is {t}')
str_element = []
enough_accuracy = False
if t >= 86400 and not enough_accuracy:
if estimation:
enough_accuracy = True
days = t // 86400
t = t - days * 86400
str_element.append(str(int(days)))
if short_form:
str_element.append('D')
else:
str_element.append('days ')
if t >= 3600 and not enough_accuracy:
if estimation:
enough_accuracy = True
hours = t // 3600
t = t - hours * 3600
str_element.append(str(int(hours)))
if short_form:
str_element.append('H')
else:
str_element.append('hrs ')
if t >= 60 and not enough_accuracy:
if estimation:
enough_accuracy = True
minutes = t // 60
t = t - minutes * 60
str_element.append(str(int(minutes)))
if short_form:
str_element.append('\'')
else:
str_element.append('min ')
if t >= 1 and not enough_accuracy:
if estimation:
enough_accuracy = True
seconds = np.floor(t)
t = t - seconds
str_element.append(str(int(seconds)))
if short_form:
str_element.append('\"')
else:
str_element.append('s ')
if not enough_accuracy:
milliseconds = np.round(t * 1000, 1)
if short_form:
str_element.append(f'{int(np.round(milliseconds)):03d}')
else:
str_element.append(str(milliseconds))
str_element.append('ms')
return ''.join(str_element)
def list_or_slice(unknown_input: [slice, int, str, list], str_int_dict):
""" 将输入的item转化为slice或数字列表的形式,用于生成HistoryPanel的数据切片:
1,当输入item为slice时,直接返回slice
2 输入数据为string, 根据string的分隔符类型确定选择的切片:
2.1, 当字符串不包含分隔符时,直接输出对应的单片数据, 如'close'输出为[0]
2.2, 当字符串以逗号分隔时,输出每个字段对应的切片,如'close,open', 输出[0, 2]
2.3, 当字符串以冒号分割时,输出第一个字段起第二个字段止的切片,如'close:open',输出[0:2] -> [0,1,2]
3 输入数据为列表时,检查列表元素的类型(不支持混合数据类型的列表如['close', 1, True]):
3.1 如果列表元素为string,输出每个字段名对应的列表编号,如['close','open'] 输出为 [0,2]
3.2 如果列表元素为int时,输出对应的列表编号,如[0,1,3] 输出[0,1,3]
3.3 如果列表元素为boolean时,输出True对应的切片编号,如[True, True, False, False] 输出为[0,1]
4 输入数据为int型时,输出相应的切片,如输入0的输出为[0]
:param unknown_input: slice or int/str or list of int/string
:param str_int_dict: a dictionary that contains strings as keys and integer as values
:return:
a list of slice/list that can be used to slice the Historical Data Object
"""
if isinstance(unknown_input, slice):
return unknown_input # slice object can be directly used
elif isinstance(unknown_input, int): # number should be converted to a list containing itself
return np.array([unknown_input])
elif isinstance(unknown_input, str): # string should be converted to numbers
string_input = unknown_input
if string_input.find(',') > 0:
string_list = str_to_list(input_string=string_input, sep_char=',')
res = [str_int_dict[string] for string in string_list]
return np.array(res)
elif string_input.find(':') > 0:
start_end_strings = str_to_list(input_string=string_input, sep_char=':')
start = str_int_dict[start_end_strings[0]]
end = str_int_dict[start_end_strings[1]]
if start > end:
start, end = end, start
return np.arange(start, end + 1)
else:
return [str_int_dict[string_input]]
elif isinstance(unknown_input, list):
is_list_of_str = isinstance(unknown_input[0], str)
is_list_of_int = isinstance(unknown_input[0], int)
is_list_of_bool = isinstance(unknown_input[0], bool)
if is_list_of_bool:
return np.array(list(str_int_dict.values()))[unknown_input]
else:
# convert all items into a number:
if is_list_of_str:
res = [str_int_dict[list_item] for list_item in unknown_input]
elif is_list_of_int:
res = [list_item for list_item in unknown_input]
else:
return None
return np.array(res)
else:
return None
def labels_to_dict(input_labels: [list, str], target_list: [list, range]) -> dict:
""" 给target_list中的元素打上标签,建立标签-元素序号映射以方便通过标签访问元素
根据输入的参数生成一个字典序列,这个字典的键为input_labels中的内容,值为一个[0~N]的range,且N=target_list中的元素的数量
这个函数生成的字典可以生成一个适合快速访问的label与target_list中的元素映射,使得可以快速地通过label访问列表中的元素
例如,列表target_list 中含有三个元素,分别是[100, 130, 170]
现在输入一个label清单,作为列表中三个元素的标签,分别为:['first', 'second', 'third']
使用labels_to_dict函数生成一个字典ID如下:
ID: {'first' : 0
'second': 1
'third' : 2}
通过这个字典,可以容易且快速地使用标签访问target_list中的元素:
target_list[ID['first']] == target_list[0] == 100
本函数对输入的input_labels进行合法性检查,确保input_labels中没有重复的标签,且标签的数量与target_list相同
:param input_labels: 输入标签,可以接受两种形式的输入:
字符串形式: 如: 'first,second,third'
列表形式,如: ['first', 'second', 'third']
:param target_list: 需要进行映射的目标列表
:return:
"""
if isinstance(input_labels, str):
input_labels = str_to_list(input_string=input_labels)
unique_count = len(set(input_labels))
assert len(input_labels) == unique_count, \
f'InputError, label duplicated, count of target list is {len(target_list)},' \
f' got {unique_count} unique labels only.'
assert unique_count == len(target_list), \
f'InputError, length of input labels does not equal to that of target list, expect ' \
f'{len(target_list)}, got {unique_count} unique labels instead.'
return dict(zip(input_labels, range(len(target_list))))
def str_to_list(input_string, sep_char: str = ','):
"""将逗号或其他分割字符分隔的字符串序列去除多余的空格后分割成字符串列表,分割字符可自定义"""
assert isinstance(input_string, str), f'InputError, input is not a string!, got {type(input_string)}'
if input_string == "":
return list()
res = input_string.replace(' ', '').split(sep_char)
return res
# TODO: this function can be merged with str_to_list()y
def input_to_list(pars: [str, int, list], dim: int, padder=None):
"""将输入的参数转化为List,同时确保输出的List对象中元素的数量至少为dim,不足dim的用padder补足
input:
:param pars,需要转化为list对象的输出对象
:param dim,需要生成的目标list的元素数量
:param padder,当元素数量不足的时候用来补充的元素
return: =====
items, list 转化好的元素清单
"""
if isinstance(pars, (str, int, np.int64)): # 处理字符串类型的输入
# print 'type of types', type(items)
pars = [pars] * dim
else:
pars = list(pars) # 正常处理,输入转化为列表类型
par_dim = len(pars)
# 当给出的两个输入参数长度不一致时,用padder补齐type输入,或者忽略多余的部分
if par_dim < dim:
pars.extend([padder] * (dim - par_dim))
return pars
def regulate_date_format(date_str: [str, object]) -> str:
""" tushare的财务报表函数只支持YYYYMMDD格式的日期,因此需要把YYYY-MM-DD及YYYY/MM/DD格式的日期转化为YYYYMMDD格式
:param date_str:
:return:
"""
try:
date_time = pd.to_datetime(date_str)
return date_time.strftime('%Y%m%d')
except:
raise ValueError(f'Input string {date_str} can not be converted to a time format')
def list_to_str_format(str_list: [list, str]) -> str:
""" tushare的财务报表函数只支持逗号分隔值的字符串形式作为ts_code或fields等字段的输入,如果输入是list[str]类型,则需要转换
将list型数据转变为str类型,如
['close', 'open', 'high', 'low'] -> 'close, open, high, low'
:param str_list: type: list[str]
:return: string
"""
assert isinstance(str_list, (list, str)), f'TypeError: expect list[str] or str type, got {type(str_list)} instead'
if isinstance(str_list, str):
str_list = str_list.split(' ')
res = ''.join([item.replace(' ', '') + ',' for item in str_list if isinstance(item, str)])
return res[0:-1]
def progress_bar(prog: int, total: int = 100, comments: str = '', short_form: bool = False):
"""根据输入的数字生成进度条字符串并刷新
:param prog: 当前进度,用整数表示
:param total: 总体进度,默认为100
:param comments: 需要显示在进度条中的文字信息
:param short_form: 显示
"""
if total > 0:
if prog > total:
prog = total
progress_str = f'\r \rProgress: [{PROGRESS_BAR[int(prog / total * 40)]}]' \
f' {prog}/{total}. {np.round(prog / total * 100, 1)}% {comments}'
sys.stdout.write(progress_str)
sys.stdout.flush()
def maybe_trade_day(date):
""" 判断一个日期是否交易日(或然判断,只剔除明显不是交易日的日期)
准确率有限但是效率高
:param date:
:type date: obj datetime-like 可以转化为时间日期格式的字符串或其他类型对象
:return:
"""
# public_holidays 是一个含两个list的tuple,存储了闭市的公共假期,第一个list是代表月份的数字,第二个list是代表日期的数字
public_holidays = ([1, 1, 1, 4, 4, 4, 5, 5, 5, 10, 10, 10, 10, 10, 10, 10],
[1, 2, 3, 3, 4, 5, 1, 2, 3, 1, 2, 3, 4, 5, 6, 7])
try:
date = pd.to_datetime(date)
except:
raise TypeError('date is not a valid date time format, cannot be converted to timestamp')
if date.weekday() > 4:
return False
for m, d in zip(public_holidays[0], public_holidays[1]):
if date.month == m and date.day == d:
return False
return True
def prev_trade_day(date):
""" 找到一个日期的前一个或然交易日
:param date:
:return:
"""
if maybe_trade_day(date):
return date
else:
d = pd.to_datetime(date)
prev = d - pd.Timedelta(1, 'd')
while not maybe_trade_day(prev):
prev = prev - pd.Timedelta(1, 'd')
return prev
def next_trade_day(date):
""" 返回一个日期的下一个或然交易日
:param date:
:return:
"""
if maybe_trade_day(date):
return date
else:
d = pd.to_datetime(date)
next = d + pd.Timedelta(1, 'd')
while not maybe_trade_day(next):
next = next + pd.Timedelta(1, 'd')
return next
def is_market_trade_day(date, exchange: str = 'SSE'):
""" 根据交易所发布的交易日历判断一个日期是否是交易日,准确性高但需要读取网络数据,因此效率低
:param date:
:type date: obj datetime-like 可以转化为时间日期格式的字符串或其他类型对象
:param exchange:
:type exchange: str 交易所代码:
SSE: 上交所,
SZSE: 深交所,
CFFEX: 中金所,
SHFE: 上期所,
CZCE: 郑商所,
DCE: 大商所,
INE: 上能源,
IB: 银行间,
XHKG: 港交所
:return:
"""
try:
_date = pd.to_datetime(date)
except Exception as ex:
ex.extra_info = f'{date} is not a valid date time format, cannot be converted to timestamp'
raise
assert _date is not None, f'{date} is not a valide date'
# TODO: 有必要将"market_trade_day_range"设定为系统参数
if _date < pd.to_datetime('19910101') or _date > pd.to_datetime('20221231'):
return False
if not isinstance(exchange, str) and exchange in ['SSE',
'SZSE',
'CFFEX',
'SHFE',
'CZCE',
'DCE',
'INE',
'IB',
'XHKG']:
raise TypeError(f'exchange \'{exchange}\' is not a valid input')
non_trade_days = qteasy.tsfuncs.trade_calendar(exchange=exchange, is_open=0)
return _date not in non_trade_days
def prev_market_trade_day(date, exchange='SSE'):
""" 根据交易所发布的交易日历找到它的前一个交易日,准确性高但需要读取网络数据,因此效率较低
:param date:
:type date: obj datetime-like 可以转化为时间日期格式的字符串或其他类型对象
:param exchange:
:type exchange: str 交易所代码:
SSE: 上交所,
SZSE: 深交所,
CFFEX: 中金所,
SHFE: 上期所,
CZCE: 郑商所,
DCE: 大商所,
INE: 上能源,
IB: 银行间,
XHKG: 港交所
:return:
"""
try:
_date = pd.to_datetime(date)
except Exception as ex:
ex.extra_info = f'{date} is not a valid date time format, cannot be converted to timestamp'
raise
assert _date is not None, f'{date} is not a valide date'
if _date < pd.to_datetime('19910101') or _date > pd.to_datetime('20221231'):
return None
if is_market_trade_day(_date, exchange):
return _date
else:
prev = _date -
|
pd.Timedelta(1, 'd')
|
pandas.Timedelta
|
#!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
from Bio import SeqIO
from collections import Counter
import os
from pathlib import Path
import gzip
from functools import reduce
import re
import sys
def concatenate_shorstacks_and_assign_unique_cluster_ids(list_of_shortstack_dfs,
outfile = "concatenated_shortstacks.tsv"):
"""
Given a list of Shortstack individual dataframes, returns a single dataframe with uniquely numbered clusters.
The final resulting dataframe is written to a tab-separated file
"""
dfs = [pd.read_csv(f, sep = "\t") for f in list_of_shortstack_dfs]
concat_df = pd.concat(dfs) # row-bind the individual dataframes
concat_df['cluster_id'] = np.arange(len(concat_df))
cluster_unique_ids = "cluster_" + concat_df["cluster_id"].astype(str)
# Add cluster_unique_id at the beginning of the dataframe
concat_df.insert(0, "cluster_unique_id", cluster_unique_ids)
# Only one id column required
concat_df = concat_df.drop(['cluster_id'], axis=1)
concat_df.to_csv(outfile, sep="\t", index = False, header = True, na_rep = "NaN")
def check_samples_tsv_file(sample_tsv_file = "config/samples.tsv"):
"""
A function to check the validity of the input sample file provided.
Checks implemented:
1) Checks whether the column are properly named. If not, will rename columns.
column1 => sample
column2 => fastq
column3 => genome
2) Checks whether a dot (.) is present in the 'sample' column of the provided sample file.
If that is the case, stop the pipeline execution and returns an explicit error message.
This is to provide compatibility with multiQC.
"""
# check naming of columns
df = pd.read_csv(sample_tsv_file, sep="\t")
colnames = df.columns.to_list()
assert colnames[0] == "sample", "Your first column in your samples.tsv file should be named 'sample' "
assert colnames[1] == "fastq", "Your first column in your samples.tsv file should be named 'fastq' "
assert colnames[2] == "genome", "Your first column in your samples.tsv file should be named 'genome' "
# check if sample names have a dot inside their name
pattern_to_find = re.compile("\.")
df = df.set_index("sample")
for sample_name in list(df.index):
if bool(pattern_to_find.search(sample_name)) == True:
sys.exit("Please replace '.' (dots) in your sample names with another character '_' (underscore")
else:
return(df)
def create_microrna_dataframe_from_shortstack_results(list_of_sample_names,list_of_shortstack_result_files,how="outer",outfile="micrornas.merged.tsv"):
"""
1. Takes a list of shortstack result files and create a list of Pandas dataframes
2. For each dataframe, keep only the microRNAs (MIRNAs == Y)
3. Select the "MajorRNA" and "MajorRNAReads" columns.
4. Rename the MajorRNAReads column using the sample name
5. Perform a recursive outer merge and output a single Pandas dataframe
The option "how" can take either "outer" or "inner" as values
Example of output file:
microrna sample1 sample2
UUUTC 10 20
UUTTC 30 50
...
"""
assert isinstance(list_of_sample_names,list), "You must provide a list containing the sample names"
assert isinstance(list_of_shortstack_result_files,list), "You must provide a list containing paths to ShortStack results files"
# reads each ShortStack result file
dfs = [pd.read_csv(df,sep="\t") for df in list_of_shortstack_result_files]
# filter the dataframes (steps 2 and 3)
dfs_filtered = [df.query("MIRNA == 'Y'") for df in dfs]
dfs_filtered = [df.loc[:,["MajorRNA","MajorRNAReads"]] for df in dfs_filtered]
# rename the MajorRNAReads column (step 4)
dfs_filtered = [df.rename(columns={"MajorRNAReads":sample}) for df,sample in zip(dfs_filtered,list_of_sample_names)]
# perform the recursive merge
if how == "outer":
df_merged = reduce(lambda left,right: pd.merge(
left.drop_duplicates("MajorRNA"),
right.drop_duplicates("MajorRNA"),
on=["MajorRNA"],how="outer"),
dfs_filtered
)
elif how == "inner":
df_merged = reduce(lambda left,right: pd.merge(
left.drop_duplicates("MajorRNA"),
right.drop_duplicates("MajorRNA"),
on=["MajorRNA"],how="inner"),
dfs_filtered
)
else:
print("The argument 'how' only accepts 'outer' or 'inner' to merge the Shortstack results")
return df_merged
def collect_clusterfiles_path(path_to_mirna_folder):
"""
Takes the MIRNAs/ folder and returns a list of cluster files from that folder
"""
list_of_cluster_files = [os.path.join(path_to_mirna_folder,f) for f in os.listdir(path_to_mirna_folder)]
return list_of_cluster_files
# defines a function to read the hairpin sequence from one MIRNA cluster file
def extract_hairpin_name_and_sequence(file,sampleName):
"""
Reads one MIRNA cluster file
It returns the corresponding cluster name and hairpin sequence in a Python dictionary
Dictionary keys: cluster names
Dictionary values: cluster sequences
"""
with open(file,"r") as filin:
lines = filin.readlines()
clusterName = lines[0].split(" ")[0].strip()
hairpinSequence = lines[2].strip()
d = {clusterName:hairpinSequence}
return d
# takes the dictionary of sequences generated by extract_hairpin_name_and_sequence
# converts it to a fasta file
def converts_list_of_sequence_dictionary_to_fasta(list_of_sequence_dictionaries,outfastafile):
"""
Takes the dictionary of sequences generated by extract_hairpin_name_and_sequence
and converts it to a fasta file
"""
with open(outfastafile,"w") as fileout:
for sequence_dictionary in list_of_sequence_dictionaries:
for clusterName, hairpinSequence in sequence_dictionary.items():
fileout.write(">" + clusterName + "\n" + hairpinSequence + "\n")
# add blast header to blast result files
def add_blast_header_to_file(blast_file_without_header,blast_file_with_header):
"""takes a blast result file (outformat 6 not customized) and add a header
"""
if os.path.getsize(blast_file_without_header) == 0:
with open(blast_file_with_header, "w") as fileout:
fileout.write("No blast hit. Check the miRBase databases you have used (correct species?).\n")
fileout.write("If you run the pipeline on subset test files, you can ignore this message.")
else:
blast_header = ["qseqid",
"subject_id",
"pct_identity",
"aln_length",
"n_of_mismatches",
"gap_openings",
"q_start",
"q_end",
"s_start",
"s_end",
"e_value",
"bit_score"]
df = pd.read_csv(blast_file_without_header,sep="\t",header=None)
df.columns = blast_header
df.to_csv(blast_file_with_header,sep="\t",header=True,index=False)
def add_sample_name_to_shortstack_results(
path_to_shortstack_results,
sample_name):
"""
Takes a "Results.txt" dataframe as produced by Shortstack.
Add the sample name as a new column (sample name is repeated N times (number of rows))
Returns a pandas dataframe as specified by outfile
"""
df =
|
pd.read_csv(path_to_shortstack_results,sep="\t")
|
pandas.read_csv
|
# Generate a nice BUSCO visualization from the BUSCO table
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
def read_busco(tbl: str, name: str) -> pd.DataFrame:
"""
Read a busco summary table into a
dataframe and create relevant columns
"""
df =
|
pd.read_csv(tbl, names=["number", "type"], sep="\t", header=None)
|
pandas.read_csv
|
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def stylePlots():
"""A quick function to make subsequent plots look nice (requires seaborn).
"""
sns.set_context("talk",font_scale=1.5)
sns.set_style('white', {'axes.linewidth': 0.5})
plt.rcParams['xtick.major.size'] = 15
plt.rcParams['ytick.major.size'] = 15
plt.rcParams['xtick.minor.size'] = 10
plt.rcParams['ytick.minor.size'] = 10
plt.rcParams['xtick.minor.width'] = 2
plt.rcParams['ytick.minor.width'] = 2
plt.rcParams['xtick.major.width'] = 2
plt.rcParams['ytick.major.width'] = 2
plt.rcParams['xtick.bottom'] = True
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.left'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['xtick.minor.visible'] = True
plt.rcParams['ytick.minor.visible'] = True
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams.update({
#"text.usetex": True,turn off for now, not sure where my latex went
"text.usetex": False,
"font.family": "serif",
"font.serif": ["Palatino"],
})
def makeCM(model, X_train, X_test, y_train, y_test, encoding_dict, fn='gp_rnn_LSSTexp', ts=000000, c='Reds', plotpath='./plot/'):
"""A custom code to generate a confusion matrix for a classification model (allows for customization past what the built-in
scikit-learn implementation allows).
Parameters
----------
model : keras model object
The classification model to evaluate.
Xtrain : 2d array-like
Features of training set.
Xtest : 2d array-like
Features of test set.
ytrain : 1d array-like
Classes of objects in training set.
ytest : 1d array-like
Classes of objects in test set.
fn : str
Prefix to add to output filename
ts : int
The timestamp for the run (used to link plots to verbose output files)
Returns
-------
None
"""
# make predictions
predictions = model.predict(X_test)
transientClasses = np.unique(list(encoding_dict.values()))
predictDF =
|
pd.DataFrame(data=predictions, columns=transientClasses)
|
pandas.DataFrame
|
#!/usr/bin/env python
"""
I use this script to determine the ratio of measurements of fluxes compared to
the number of temperature measurements for FLUXNET and LaThuille sites.
This is done for latent heat, sensible heat and NEE. I focus on
extreme temperatures (lower and upper 2.2% of the temperature distribution
of each site )
"""
__author__ = "<NAME>"
__version__ = "1.0 (25.10.2018)"
__email__ = "<EMAIL>"
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import pandas as pd
import glob
import xarray as xr
import os
def main(files_met, files_flux, ofname1, ofname2, ofname3, ofname4, plot_dir):
# empty lists to add data in for the table/figures
results_LH = []
results_SH = []
results_NEE = []
lons = []
lats = []
# creating dataframes for barplots of global temperature distribution
df_temp = pd.DataFrame()
df_LH = pd.DataFrame()
df_SH = pd.DataFrame()
df_NEE = pd.DataFrame()
for m,f in zip(files_met, files_flux):
print(m,f)
ds_met = open_file(m)
ds_flux = open_file(f)
lat = ds_met['latitude'].mean()
lon = ds_met['longitude'].mean()
if len(ds_met) > ((2*365*48)/3) and (len(ds_met[ds_met.Tair_qc < 1])/len(ds_met))>0.5:
(ds_met, ds_flux,
Tair_measured, mean_tair_measured) = screen_data(ds_met, ds_flux)
ppt_yearly = ((ds_met.Rainf.mean())*48*365)
stdev_tair_measured = np.std(Tair_measured.Tair)
# Ignore data less than 8 months long ... and athe total length
# of the dataset and the percentage of measured temperatures.
# Create plot normal distribution of the temperature
plot_normal_dist(Tair_measured, mean_tair_measured, ds_met, plot_dir)
# creating bins of 1 K and assigning a temperature to a bin.
minimum_tair = (min(ds_met.Tair))
maximum_tair = (max(ds_met.Tair))
bins = np.arange(np.floor(minimum_tair),
np.ceil(maximum_tair+1)).astype(int)
bin_label = np.arange(np.floor(minimum_tair),
np.ceil(maximum_tair)).astype(int)
data_binned =
|
pd.cut(ds_met.Tair, bins, labels=bin_label)
|
pandas.cut
|
#!/usr/bin/env python3
'''
Script to generate figures that demonstrate the throughput improvements with batching.
It either shows speed up in terms of throughput or throughput with and without batching side by side.
Code that shows throughput with and without batching side by side is commented out at the moment.
'''
import getopt
import matplotlib.pyplot as plt
import pandas as pd
import sys
from orm_classes import Base
from sqlalchemy import create_engine
BAR_PLOT_HEIGHTS = 1.5
def plot_hand_implementation_comparison(connection):
df_hand_implementation = pd.read_sql(
'Select sample_application_name, input_size, relative_deadline, worker_wcet, dop, AVG(min_period) AS min_period ' \
'FROM ThroughputWithHandImplementations ' \
'WHERE is_hand_implementation = 1 ' \
'GROUP BY sample_application_name, input_size, relative_deadline, worker_wcet, dop', connection)
df_peso = pd.read_sql(
'Select sample_application_name, input_size, relative_deadline, worker_wcet, dop, AVG(min_period) AS min_period ' \
'From ThroughputWithHandImplementations ' \
'Where is_hand_implementation = 0 ' \
'GROUP BY sample_application_name, input_size, relative_deadline, worker_wcet, dop', connection)
df = df_peso.join(
df_hand_implementation.set_index(['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'dop']),
lsuffix='_peso',
rsuffix='_hand_implementation',
on=['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'dop']
)
df = df.sort_values(['sample_application_name', 'input_size'], ascending=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
sample_application_names = []
peso_throughput = []
hand_impl_throughput = []
for index, row in df.iterrows():
sample_application_names.append((row['sample_application_name'] + '\ni' + str(row['input_size']))
.replace('reduction', 'RED')
.replace('sparse_matrix_vector_multiplication', 'SMV')
.replace('dense_matrix_vector_multiplication', 'DMV'))
peso_throughput.append(row['min_period_peso'] / 1000.0)
hand_impl_throughput.append(row['min_period_hand_implementation'] / 1000.0)
'''for i in range(10):
sample_application_names.append('TBD')
peso_throughput.append(0.1)
hand_impl_throughput.append(0.1)'''
df_to_plot = pd.DataFrame({
'Peso': peso_throughput,
'Hand impl.': hand_impl_throughput},
index=sample_application_names)
# Debugging output
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_to_plot)
df_to_plot.plot(kind='bar',
figsize=(6.5, BAR_PLOT_HEIGHTS),
edgecolor='none',
color=['#0165fc', '#f1a340'],
legend=True)
ax = plt.axes()
ax.plot([1], [1])
ax.yaxis.grid()
ax.tick_params(axis=u'both', which=u'both', length=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color('#6F6F6F')
ax.spines['left'].set_color('#6F6F6F')
ax.set_ylabel('Minimum\nperiod'
' ($\mu$s)')
plt.axis('tight')
plt.ylim([0, 1.7])
plt.yticks([0, 0.85, 1.7])
plt.xlim([-ax.patches[0].get_width(), 5 + ax.patches[0].get_width()])
plt.gcf().subplots_adjust(bottom=0.4) # Make sure the x labels are not cut off
leg = plt.legend()
leg.get_frame().set_linewidth(0.0) # Remove the frame around the legend
plt.legend(bbox_to_anchor=(0.56, 1.3), loc=2, borderaxespad=0., ncol=2, frameon=False)
# plt.show()
plt.savefig('../paper/figures/eval_implementation_overhead.pdf', bbox_inches='tight')
def plot_dop_model_accuracy_experiments(connection):
# Load data from the DB into pandas data frames
df_baseline = pd.read_sql('Select * From DOPModelAccuracySample Where is_oracle = 0 and sample = 1',
connection)
df_oracle = pd.read_sql('Select * From DOPModelAccuracySample Where is_oracle = 1 and sample = 1',
connection)
# Prepare data that will be plotted
df = df_baseline.join(
df_oracle.set_index(['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'period']),
lsuffix='_baseline',
rsuffix='_oracle',
on=['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'period']
)
df = df.sort_values(['sample_application_name', 'input_size', 'period'], ascending=True)
# Debugging output
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df)
# Create data arrays
sample_application_names = []
baseline_dop = []
oracle_dop = []
for index, row in df.iterrows():
sample_application_names.append((row['sample_application_name'] + '\ni' + str(row['input_size']) + ' p' +
str(float(row['period']) / 1000.0).replace('.0', ''))
.replace('reduction', 'RED')
.replace('sparse_matrix_vector_multiplication', 'SMV')
.replace('dense_matrix_vector_multiplication', 'DMV'))
baseline_dop.append(row['dop_baseline'])
oracle_dop.append(row['dop_oracle'])
'''for i in range(5):
sample_application_names.append('TBD')
baseline_dop.append(0.1)
oracle_dop.append(0.1)'''
df_to_plot = pd.DataFrame({'Our analytical framework': baseline_dop,
'Experimental results': oracle_dop},
index=sample_application_names)
# Debugging output
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(df_to_plot)
df_to_plot.plot(kind='bar',
figsize=(14, BAR_PLOT_HEIGHTS),
edgecolor='none',
color=['#99d594', '#f1a340'],
legend=True)
ax = plt.axes()
ax.plot([1], [1]) # Remove ticks
ax.yaxis.grid() # Show horizontal lines for better readability
ax.tick_params(axis=u'both', which=u'both', length=0) # Remove ticks
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_color('#6F6F6F')
ax.spines['left'].set_color('#6F6F6F')
ax.set_ylabel('Worker\ncore count')
# plt.xticks(rotation='horizontal')
plt.yticks([0, 3, 6])
plt.axis('tight') # Remove margins on the very left and very right
plt.ylim([0, 6])
plt.xlim([-ax.patches[0].get_width(), 17 + ax.patches[0].get_width()])
plt.gcf().subplots_adjust(bottom=0.4) # Make sure the x labels are not cut off
leg = plt.legend()
leg.get_frame().set_linewidth(0.0) # Remove the frame around the legend
plt.legend(bbox_to_anchor=(0.6, 1.3), loc=2, borderaxespad=0., ncol=2, frameon=False)
#plt.show()
plt.savefig('../paper/figures/dop_model_oracle_study.pdf', bbox_inches='tight')
def plot_batch_size_model_accuracy_experiments(connection):
# Load data from the DB into pandas data frames
df_baseline = pd.read_sql('Select * From BatchSizeModelAccuracySample Where is_oracle = 0 and sample = 1',
connection)
df_oracle = pd.read_sql('Select * From BatchSizeModelAccuracySample Where is_oracle = 1 and sample = 1',
connection)
# Prepare data that will be plotted i.e. the oracle and the 'our model' data set
df = df_baseline.join(
df_oracle.set_index(['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'period']),
lsuffix='_baseline',
rsuffix='_oracle',
on=['sample_application_name',
'input_size',
'relative_deadline',
'worker_wcet',
'period']
)
df = df.sort_values(['sample_application_name', 'input_size', 'period'], ascending=True)
# Debugging output
with
|
pd.option_context('display.max_rows', None, 'display.max_columns', None)
|
pandas.option_context
|
import blpapi
import logging
import datetime
import pandas as pd
import contextlib
from collections import defaultdict
from pandas import DataFrame
@contextlib.contextmanager
def bopen(debug=False):
con = BCon(debug=debug)
con.start()
try:
yield con
finally:
con.stop()
class BCon(object):
def __init__(self, host='localhost', port=8194, debug=False):
"""
Create an object which manages connection to the Bloomberg API session
Parameters
----------
host: str
Host name
port: int
Port to connect to
debug: Boolean {True, False}
Boolean corresponding to whether to log Bloomberg Open API request
and response messages to stdout
"""
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(host)
sessionOptions.setServerPort(port)
self._sessionOptions = sessionOptions
# Create a Session
self.session = blpapi.Session(sessionOptions)
# initialize logger
self.debug = debug
@property
def debug(self):
"""
When True, print all Bloomberg Open API request and response messages
to stdout
"""
return self._debug
@debug.setter
def debug(self, value):
"""
Set whether logging is True or False
"""
self._debug = value
root = logging.getLogger()
if self._debug:
# log requests and responses
root.setLevel(logging.DEBUG)
else:
# log only failed connections
root.setLevel(logging.INFO)
def start(self):
"""
start connection and init service for refData
"""
# Start a Session
if not self.session.start():
logging.info("Failed to start session.")
return
self.session.nextEvent()
# Open service to get historical data from
if not self.session.openService("//blp/refdata"):
logging.info("Failed to open //blp/refdata")
return
self.session.nextEvent()
# Obtain previously opened service
self.refDataService = self.session.getService("//blp/refdata")
self.session.nextEvent()
def restart(self):
"""
Restart the blp session
"""
# Recreate a Session
self.session = blpapi.Session(self._sessionOptions)
self.start()
def _create_req(self, rtype, tickers, flds, ovrds, setvals):
# flush event queue in case previous call errored out
while(self.session.tryNextEvent()):
pass
request = self.refDataService.createRequest(rtype)
for t in tickers:
request.getElement("securities").appendValue(t)
for f in flds:
request.getElement("fields").appendValue(f)
for name, val in setvals:
request.set(name, val)
overrides = request.getElement("overrides")
for ovrd_fld, ovrd_val in ovrds:
ovrd = overrides.appendElement()
ovrd.setElement("fieldId", ovrd_fld)
ovrd.setElement("value", ovrd_val)
return request
def bdh(self, tickers, flds, start_date, end_date, elms=[],
ovrds=[], longdata=False):
"""
Get tickers and fields, return pandas dataframe with column MultiIndex
of tickers and fields if multiple fields given an Index otherwise.
If single field is given DataFrame is ordered same as tickers,
otherwise MultiIndex is sorted
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
elms: list of tuples
List of tuples where each tuple corresponds to the other elements
to be set, e.g. [("periodicityAdjustment", "ACTUAL")]
Refer to A.2.4 HistoricalDataRequest in the Developers Guide for
more info on these values
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
longdata: boolean
Whether data should be returned in long data format or pivoted
"""
elms = list(elms)
data = self._bdh_list(tickers, flds, start_date, end_date,
elms, ovrds)
df = DataFrame(data)
df.columns = ["date", "ticker", "field", "value"]
df.loc[:, "date"] = pd.to_datetime(df.loc[:, "date"])
if not longdata:
cols = ['ticker', 'field']
df = df.set_index(['date'] + cols).unstack(cols)
df.columns = df.columns.droplevel(0)
return df
def _bdh_list(self, tickers, flds, start_date, end_date, elms,
ovrds):
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
setvals = elms
setvals.append(("startDate", start_date))
setvals.append(("endDate", end_date))
request = self._create_req("HistoricalDataRequest", tickers, flds,
ovrds, setvals)
logging.debug("Sending Request:\n %s" % request)
# Send the request
self.session.sendRequest(request)
data = []
# Process received events
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
if msg.getElement('securityData').hasElement('securityError') or (msg.getElement('securityData').getElement("fieldExceptions").numValues() > 0): # NOQA
raise Exception(msg)
ticker = msg.getElement('securityData').getElement('security').getValue() # NOQA
fldDatas = msg.getElement('securityData').getElement('fieldData') # NOQA
for fd in fldDatas.values():
dt = fd.getElement('date').getValue()
for element in fd.elements():
fname = str(element.name())
if fname == "date":
continue
val = element.getValue()
data.append((dt, ticker, fname, val))
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completely received, so we could exit
break
return data
def ref(self, tickers, flds, ovrds=[]):
"""
Make a reference data request, get tickers and fields, return long
pandas Dataframe with columns [ticker, field, value]
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
"""
data = self._ref(tickers, flds, ovrds)
data = DataFrame(data)
data.columns = ["ticker", "field", "value"]
return data
def _ref(self, tickers, flds, ovrds):
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
request = self._create_req("ReferenceDataRequest", tickers, flds,
ovrds, [])
logging.debug("Sending Request:\n %s" % request)
# Send the request
self.session.sendRequest(request)
data = []
# Process received events
while(True):
# We provide timeout to give the chance for Ctrl+C handling:
ev = self.session.nextEvent(500)
for msg in ev:
logging.debug("Message Received:\n %s" % msg)
fldData = msg.getElement('securityData')
for i in range(fldData.numValues()):
ticker = (fldData.getValue(i).getElement("security").getValue()) # NOQA
reqFldsData = (fldData.getValue(i).getElement('fieldData'))
for j in range(reqFldsData.numElements()):
fld = flds[j]
# this is for dealing with requests which return arrays
# of values for a single field
if reqFldsData.getElement(fld).isArray():
lrng = reqFldsData.getElement(fld).numValues()
for k in range(lrng):
elms = (reqFldsData.getElement(fld).getValue(k).elements()) # NOQA
# if the elements of the array have multiple
# subelements this will just append them all
# into a list
for elm in elms:
data.append([ticker, fld, elm.getValue()])
else:
val = reqFldsData.getElement(fld).getValue()
data.append([ticker, fld, val])
if ev.eventType() == blpapi.Event.RESPONSE:
# Response completely received, so we could exit
break
return data
def ref_hist(self, tickers, flds, start_date,
end_date=datetime.date.today().strftime('%Y%m%d'),
timeout=2000, longdata=False):
"""
Get tickers and fields, periodically override REFERENCE_DATE to create
a time series. Return pandas dataframe with column MultiIndex
of tickers and fields if multiple fields given, Index otherwise.
If single field is given DataFrame is ordered same as tickers,
otherwise MultiIndex is sorted
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
start_date: string
String in format YYYYmmdd
end_date: string
String in format YYYYmmdd
timeout: int
Passed into nextEvent(timeout), number of milliseconds before
timeout occurs
"""
# correlationIDs should be unique to a session so rather than
# managing unique IDs for the duration of the session just restart
# a session for each call
self.restart()
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
# Create and fill the request for the historical data
request = self.refDataService.createRequest("ReferenceDataRequest")
for t in tickers:
request.getElement("securities").appendValue(t)
for f in flds:
request.getElement("fields").appendValue(f)
overrides = request.getElement("overrides")
dates =
|
pd.date_range(start_date, end_date, freq='b')
|
pandas.date_range
|
import pandas as pd
import numpy as np
import pickle
import os
import re
def pkl_to_df(model_dir):
''' Perform ETL on all models in a specified directory (that exist as .pkl files), transform into tidy formatted DataFrame. Training loss is averaged over chunks to a per-epoch level.'''
try:
os.chdir(os.path.expanduser(model_dir))
except FileNotFoundError as f:
print("Wrong directory? Can't cd to ", model_dir)
exit(1)
# first, grab the models, turn the .pkl files into objects, and get the data
model_files = [f for f in os.listdir('.') if f.endswith('.pkl')]
model_names = [f.split('.')[0] for f in model_files]
models = [pickle.load(open(f,'rb')) for f in model_files]
# next, turn this collection of python objects into a handful of tidy data pandas dfs
dfs_auroc = [pd.DataFrame.from_items([('model',[name for i in range(len(model['losses_valid_auc']))]),('epoch',[i for i in range(len(model['losses_valid_auc']))]), ('measure', ['validation AUROC' for i in range(len(model['losses_valid_auc']))]),('score', model['losses_valid_auc'])]) for name, model in zip(model_names, models)]
df_auroc = pd.concat(dfs_auroc)
dfs_vloss = [pd.DataFrame.from_items([('model',[name for i in range(len(model['losses_valid_xent']))]),('epoch',[i for i in range(len(model['losses_valid_xent']))]),('measure', ['validation Xent loss' for i in range(len(model['losses_valid_xent']))]),('score', model['losses_valid_xent'])]) for name, model in zip(model_names, models)]
df_vloss = pd.concat(dfs_vloss)
dfs_aupr = [pd.DataFrame.from_items([('model',[name for i in range(len(model['losses_valid_aupr']))]),('epoch',[i for i in range(len(model['losses_valid_aupr']))]),('measure',['validation AUPR' for i in range(len(model['losses_valid_aupr']))]),('score', model['losses_valid_aupr'])]) for name, model in zip(model_names, models)]
df_aupr =
|
pd.concat(dfs_aupr)
|
pandas.concat
|
from __future__ import print_function
import os
import pandas as pd
import pymongo
import datetime
import socket
import json
import hashlib
import getpass
from baseline.utils import export, listify
from xpctl.core import ExperimentRepo, store_model
from bson.objectid import ObjectId
from baseline.version import __version__
from xpctl.helpers import order_json, df_get_results, df_experimental_details, get_experiment_label
__all__ = []
exporter = export(__all__)
@exporter
class MongoRepo(ExperimentRepo):
def __init__(self, host, port, user, passw):
super(MongoRepo, self).__init__()
self.dbhost = host
if user and passw:
uri = "mongodb://{}:{}@{}:{}/test".format(user, passw, host, port)
client = pymongo.MongoClient(uri)
else:
client = pymongo.MongoClient(host, port)
if client is None:
s = "can not connect to mongo at host: [{}], port [{}], username: [{}], password: [{}]".format(host,
port,
user,
passw)
raise Exception(s)
try:
dbnames = client.database_names()
except pymongo.errors.ServerSelectionTimeoutError:
raise Exception("can not get database from mongo at host: {}, port {}, connection timed out".format(host,
port))
if "reporting_db" not in dbnames:
raise Exception("no database for results found")
self.db = client.reporting_db
def put_result(self, task, config_obj, events_obj, **kwargs):
now = datetime.datetime.utcnow().isoformat()
train_events = list(filter(lambda x: x['phase'] == 'Train', events_obj))
valid_events = list(filter(lambda x: x['phase'] == 'Valid', events_obj))
test_events = list(filter(lambda x: x['phase'] == 'Test', events_obj))
checkpoint_base = kwargs.get('checkpoint_base', None)
checkpoint_store = kwargs.get('checkpoint_store', None)
print_fn = kwargs.get('print_fn', print)
hostname = kwargs.get('hostname', socket.gethostname())
username = kwargs.get('username', getpass.getuser())
config_sha1 = hashlib.sha1(json.dumps(order_json(config_obj)).encode('utf-8')).hexdigest()
label = get_experiment_label(config_obj, task, **kwargs)
post = {
"config": config_obj,
"train_events": train_events,
"valid_events": valid_events,
"test_events": test_events,
"username": username,
"hostname": hostname,
"date": now,
"label": label,
"sha1": config_sha1,
"version": __version__
}
if checkpoint_base:
model_loc = store_model(checkpoint_base, config_sha1, checkpoint_store)
if model_loc is not None:
post.update({"checkpoint": "{}:{}".format(hostname, os.path.abspath(model_loc))})
else:
print_fn("model could not be stored, see previous errors")
if task in self.db.collection_names():
print_fn("updating results for existing task [{}] in host [{}]".format(task, self.dbhost))
else:
print_fn("creating new task [{}] in host [{}]".format(task, self.dbhost))
coll = self.db[task]
result = coll.insert_one(post)
print_fn("results updated, the new results are stored with the record id: {}".format(result.inserted_id))
return result.inserted_id
def has_task(self, task):
return task in self.get_task_names()
def put_model(self, id, task, checkpoint_base, checkpoint_store, print_fn=print):
coll = self.db[task]
query = {'_id': ObjectId(id)}
projection = {'sha1': 1}
results = list(coll.find(query, projection))
if not results:
print_fn("no sha1 for the given id found, returning.")
return False
sha1 = results[0]['sha1']
model_loc = store_model(checkpoint_base, sha1, checkpoint_store, print_fn)
if model_loc is not None:
coll.update_one({'_id': ObjectId(id)}, {'$set': {'checkpoint': model_loc}}, upsert=False)
return model_loc
def get_label(self, id, task):
coll = self.db[task]
label = coll.find_one({'_id': ObjectId(id)}, {'label': 1})["label"]
return label
def rename_label(self, id, task, new_label):
coll = self.db[task]
prev_label = coll.find_one({'_id': ObjectId(id)}, {'label': 1})["label"]
coll.update({'_id': ObjectId(id)}, {'$set': {'label': new_label}}, upsert=False)
changed_label = coll.find_one({'_id': ObjectId(id)}, {'label': 1})["label"]
return prev_label, changed_label
def rm(self, id, task, print_fn=print):
coll = self.db[task]
prev = coll.find_one({'_id': ObjectId(id)}, {'label': 1})
if prev is None:
return False
model_loc = self.get_model_location(id, task)
if model_loc is not None and os.path.exists(model_loc):
os.remove(model_loc)
else:
print_fn("No model stored for this record. Only purging the database.")
coll.remove({'_id': ObjectId(id)})
assert coll.find_one({'_id': ObjectId(id)}) is None
print_fn("record {} deleted successfully from database {}".format(id, task))
return True
def _get_metrics(self, xs, event_type):
keys = []
for x in xs:
if x[event_type]:
for k in x[event_type][0].keys():
keys.append(k)
keys = set(keys)
if 'tick_type' in keys:
keys.remove("tick_type")
if 'tick' in keys:
keys.remove("tick")
if 'phase' in keys:
keys.remove("phase")
return keys
def _generate_data_frame(self, coll, metrics, query, projection, event_type):
all_results = list(coll.find(query, projection))
if not all_results:
return pd.DataFrame()
results = []
ms = list(set(metrics)) if len(metrics) > 0 else list(self._get_metrics(all_results, event_type))
for result in all_results: # different experiments
for index in range(len(result[event_type])): # train_event epoch 0,
# train_event epoch 1 etc, for event_type = test_event, there is only one event
data = []
for metric in ms:
data.append(result[event_type][index][metric])
results.append(
[result['_id'], result['username'], result['label'], result['config']['dataset'], result.get('sha1'),
result['date']] + data)
return pd.DataFrame(results, columns=['id', 'username', 'label', 'dataset', 'sha1', 'date'] + ms)
def _update_query(self, q, **kwargs):
query = q
if not kwargs:
return query
else:
if "username" in kwargs and kwargs["username"]:
query.update({"username": {"$in": list(kwargs["username"])}})
if "dataset" in kwargs:
query.update({"config.dataset": kwargs["dataset"]})
if "sha1" in kwargs:
query.update({"sha1": kwargs["sha1"]})
return query
def _update_projection(self, event_type):
projection = {"_id": 1, "sha1": 1, "label": 1, "username": 1, "config.dataset": 1, "date": 1}
projection.update({event_type: 1})
return projection
def experiment_details(self, user, metric, sort, task, event_type, sha1, n):
metrics = listify(metric)
coll = self.db[task]
users = listify(user)
query = self._update_query({}, username=users, sha1=sha1)
projection = self._update_projection(event_type=event_type)
result_frame = self._generate_data_frame(coll, metrics=metrics, query=query, projection=projection, event_type=event_type)
return df_experimental_details(result_frame, sha1, users, sort, metric, n)
def get_results(self, task, dataset, event_type, num_exps=None, num_exps_per_config=None, metric=None, sort=None):
metrics = listify(metric)
coll = self.db[task]
query = self._update_query({}, dataset=dataset)
projection = self._update_projection(event_type=event_type)
result_frame = self._generate_data_frame(coll, metrics=metrics, query=query, projection=projection, event_type=event_type)
if not result_frame.empty:
return df_get_results(result_frame, dataset, num_exps, num_exps_per_config, metric, sort)
return None
def config2dict(self, task, sha1):
coll = self.db[task]
j = coll.find_one({"sha1": sha1}, {"config": 1})["config"]
if not j:
return None
else:
return j
def get_task_names(self):
return self.db.collection_names()
def get_model_location(self, id, task):
coll = self.db[task]
query = {'_id': ObjectId(id)}
projection = {'checkpoint': 1}
results = [x.get('checkpoint', None) for x in list(coll.find(query, projection))]
results = [x for x in results if x]
if not results:
return None
return results[0]
def get_info(self, task, event_type):
coll = self.db[task]
q = {}
p = {'config.dataset': 1}
datasets = list(set([x['config']['dataset'] for x in list(coll.find(q, p))]))
store = []
for dataset in datasets:
q = self._update_query({}, dataset=dataset)
p = self._update_projection(event_type)
results = list(coll.find(q, p))
for result in results: # different experiments
store.append([result['username'], result['config']['dataset'], task])
df =
|
pd.DataFrame(store, columns=['user', 'dataset', 'task'])
|
pandas.DataFrame
|
import os
os.chdir('MERFISH_Moffit/')
import numpy as np
import pandas as pd
import pickle
import matplotlib
matplotlib.use('qt5agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
import matplotlib.pyplot as plt
import scipy.stats as st
with open ('data/SpaGE_pkl/MERFISH.pkl', 'rb') as f:
datadict = pickle.load(f)
MERFISH_data = datadict['MERFISH_data']
del datadict
with open ('data/SpaGE_pkl/Moffit_RNA.pkl', 'rb') as f:
datadict = pickle.load(f)
RNA_data = datadict['RNA_data']
del datadict
Gene_Order = np.intersect1d(MERFISH_data.columns,RNA_data.columns)
### SpaGE
SpaGE_imputed =
|
pd.read_csv('Results/SpaGE_LeaveOneOut.csv',header=0,index_col=0,sep=',')
|
pandas.read_csv
|
"""
@Author : TeJas.Lotankar
Description:
------------
Utils and helper functions for Action Recognition.
"""
# imports
import mxnet as mx
from matplotlib import pyplot as plt
from gluoncv import model_zoo, data, utils
from PIL import Image
import cv2
import numpy as np
import pandas as pd
from mxnet import gluon, nd, image
from mxnet.gluon.data.vision import transforms
from gluoncv.data.transforms import video
from gluoncv.model_zoo import get_model
from gluoncv.utils.filesystem import try_import_decord
def get_action_recognition(video_obj, model_arch = "slowfast_4x16_resnet50_kinetics400"):
'''
//TODO
'''
# starting decord
decord = try_import_decord()
net = get_model(model_arch, pretrained=True)
try:
video_obj = utils.download(video_obj)
except ValueError:
pass
vr = decord.VideoReader(video_obj)
if "slowfast" in model_arch:
fast_frame_id_list = range(0, 64, 2)
slow_frame_id_list = range(0, 64, 16)
frame_id_list = list(fast_frame_id_list) + list(slow_frame_id_list)
else:
frame_id_list = range(0, 64, 2)
print("=========Reached here============")
video_data = vr.get_batch(frame_id_list).asnumpy()
clip_input = [video_data[vid, :, :, :] for vid, _ in enumerate(frame_id_list)]
if "inceptionv3" in model_arch:
transform_fn = video.VideoGroupValTransform(size=299, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
clip_input = transform_fn(clip_input)
clip_input = np.stack(clip_input, axis=0)
if "slowfast" in model_arch:
clip_input = clip_input.reshape((-1,) + (36, 3, 340, 450))
else:
clip_input = clip_input.reshape((-1,) + (32, 3, 340, 450))
clip_input = np.transpose(clip_input, (0, 2, 1, 3, 4))
else:
transform_fn = video.VideoGroupValTransform(size=224, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
clip_input = transform_fn(clip_input)
clip_input = np.stack(clip_input, axis=0)
if "slowfast" in model_arch:
clip_input = clip_input.reshape((-1,) + (36, 3, 224, 224))
else:
clip_input = clip_input.reshape((-1,) + (32, 3, 224, 224))
clip_input = np.transpose(clip_input, (0, 2, 1, 3, 4))
pred = net(nd.array(clip_input))
classes = net.classes
topK = 5
ind = nd.topk(pred, k=topK)[0].astype('int')
resList = []
for i in range(topK):
resList.append( [classes[ind[i].asscalar()], nd.softmax(pred)[0][ind[i]].asscalar()] )
resDF =
|
pd.DataFrame(resList, columns=["class", "prob"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import sys
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.util import testing as tm
class TestToCSV(object):
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with tm.ensure_clean('test.csv') as path:
df1.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
expected2 = """\
1.0
""
"""
with tm.ensure_clean('test.csv') as path:
df2.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
# GH17097
df = DataFrame({'col': [u"AAAAA", u"ÄÄÄÄÄ", u"ßßßßß", u"聞聞聞聞聞"]})
with tm.ensure_clean('test.csv') as path:
# the default to_csv encoding in Python 2 is ascii, and that in
# Python 3 is uft-8.
if pd.compat.PY2:
# the encoding argument parameter should be utf-8
with tm.assert_raises_regex(UnicodeEncodeError, 'ascii'):
df.to_csv(path)
else:
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
assert f.read() == expected
with tm.ensure_clean('test.csv') as path:
with tm.assert_raises_regex(TypeError, 'quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
def test_to_csv_doublequote(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with tm.assert_raises_regex(Error, 'escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
def test_to_csv_escapechar(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a\\"a"
"1","\\"bb\\""
'''
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
assert f.read() == expected
df = DataFrame({'col': ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
assert f.read() == expected
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
expected = ',col\n0,1\n1,2\n'
assert df.to_csv() == expected
def test_to_csv_decimal(self):
# GH 781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
expected_default = ',col1,col2,col3\n0,1,a,10.1\n'
assert df.to_csv() == expected_default
expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n'
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
expected_float_format_default = ',col1,col2,col3\n0,1,a,10.10\n'
assert df.to_csv(float_format='%.2f') == expected_float_format_default
expected_float_format = ';col1;col2;col3\n0;1;a;10,10\n'
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
# GH 11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0^0,2^2,1\n1^1,3^3,1\n'
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
assert df.set_index('a').to_csv(decimal='^') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(decimal="^") == expected
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0,2.20,1\n1,3.30,1\n'
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
# testing if NaN values are correctly represented in the index
# GH 11553
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0.0,0,2\n_,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n_,0,2\n_,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# check if na_rep parameter does not break anything when no NaN
df = DataFrame({'a': 0, 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0,0,2\n0,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
def test_to_csv_date_format(self):
# GH 10209
df_sec = DataFrame({'A':
|
pd.date_range('20130101', periods=5, freq='s')
|
pandas.date_range
|
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
import pandas._testing as tm
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
_msg_validate_usecols_names = (
"Usecols do not match columns, columns expected but not found: {0}"
)
def test_raise_on_mixed_dtype_usecols(all_parsers):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
usecols = [0, "b", 2]
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
def test_usecols(all_parsers, usecols):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_names(all_parsers):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
names = ["foo", "bar"]
result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
)
def test_usecols_relative_to_names(all_parsers, names, usecols):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_relative_to_names2(all_parsers):
# see gh-5766
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
)
expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_usecols_name_length_conflict(all_parsers):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
msg = "Number of passed names did not match number of header fields in the file"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1])
def test_usecols_single_string(all_parsers):
# see gh-20558
parser = all_parsers
data = """foo, bar, baz
1000, 2000, 3000
4000, 5000, 6000"""
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols="foo")
@pytest.mark.parametrize(
"data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"]
)
def test_usecols_index_col_false(all_parsers, data):
# see gh-9082
parser = all_parsers
usecols = ["a", "c", "d"]
expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]})
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", ["b", 0])
@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]])
def test_usecols_index_col_conflict(all_parsers, usecols, index_col):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b"))
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_usecols_index_col_conflict2(all_parsers):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")})
expected = expected.set_index(["b", "c"])
result = parser.read_csv(
StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_usecols_implicit_index_col(all_parsers):
# see gh-2654
parser = all_parsers
data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
result = parser.read_csv(StringIO(data), usecols=["a", "b"])
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(all_parsers):
# see gh-2733
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(all_parsers):
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"usecols,expected",
[
# Column selection by index.
([0, 1], DataFrame(data=[[1000, 2000], [4000, 5000]], columns=["2", "0"])),
# Column selection by name.
(["0", "1"], DataFrame(data=[[2000, 3000], [5000, 6000]], columns=["0", "1"])),
],
)
def test_usecols_with_integer_like_header(all_parsers, usecols, expected):
parser = all_parsers
data = """2,0,1
1000,2000,3000
4000,5000,6000"""
result = parser.read_csv(StringIO(data), usecols=usecols)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
# Globals #
import re
import numpy as np
import pandas as pd
import dateutil.parser as dp
from nltk import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import *
from itertools import islice
from scipy.stats import boxcox
from scipy.integrate import simps
from realtime_talib import Indicator
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from pprint import pprint
from selenium import webdriver
RANDOM_STATE = 42
# Sentiment Preprocessing
def remove_special_chars(headline_list):
"""
Returns list of headlines with all non-alphabetical characters removed.
"""
rm_spec_chars = [re.sub('[^ A-Za-z]+', "", headline) for headline in headline_list]
return rm_spec_chars
def tokenize(headline_list):
"""
Takes list of headlines as input and returns a list of lists of tokens.
"""
tokenized = []
for headline in headline_list:
tokens = word_tokenize(headline)
tokenized.append(tokens)
# print("tokenize")
# pprint(tokenized)
return tokenized
def remove_stop_words(tokenized_headline_list):
"""
Takes list of lists of tokens as input and removes all stop words.
"""
filtered_tokens = []
for token_list in tokenized_headline_list:
filtered_tokens.append([token for token in token_list if token not in set(stopwords.words('english'))])
# print("stop words")
# pprint(filtered_tokens)
return filtered_tokens
def stem(token_list_of_lists):
"""
Takes list of lists of tokens as input and stems every token.
Returns a list of lists of stems.
"""
stemmer = PorterStemmer()
stemmed = []
for token_list in token_list_of_lists:
# print(token_list)
stemmed.append([stemmer.stem(token) for token in token_list])
# print("stem")
# pprint(stemmed)
return stemmed
def make_bag_of_words(df, stemmed):
"""
Create bag of words model.
"""
print("\tCreating Bag of Words Model...")
very_pos = set()
slightly_pos = set()
neutral = set()
slightly_neg = set()
very_neg = set()
# Create sets that hold words in headlines categorized as "slightly_neg" or "slightly_pos" or etc
for stems, sentiment in zip(stemmed, df["Sentiment"].tolist()):
if sentiment == -2:
very_neg.update(stems)
elif sentiment == -1:
slightly_neg.update(stems)
elif sentiment == 0:
neutral.update(stems)
elif sentiment == 1:
slightly_pos.update(stems)
elif sentiment == 2:
very_pos.update(stems)
# Count number of words in each headline in each of the sets and encode it as a list of counts for each headline.
bag_count = []
for x in stemmed:
x = set(x)
bag_count.append(list((len(x & very_neg), len(x & slightly_neg), len(x & neutral), len(x & slightly_pos), len(x & very_pos))))
df["sentiment_class_count"] = bag_count
return df
def sentiment_preprocessing(df):
"""
Takes a dataframe, removes special characters, tokenizes
the headlines, removes stop-tokens, and stems the remaining tokens.
"""
specials_removed = remove_special_chars(df["Headline"].tolist())
tokenized = tokenize(specials_removed)
tokenized_filtered = remove_stop_words(tokenized)
stemmed = stem(tokenized_filtered)
return df, stemmed
def headlines_balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\nSplitting headlines into *balanced* training and test sets...")
# pprint(list(dataset.values))
# pprint(dataset)
# Use sklearn.train_test_split to split all features into x_train and x_test,
# and all expected values into y_train and y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Sentiment", "Headline"], axis=1).values,
dataset["Sentiment"].values, test_size=test_size,
random_state=RANDOM_STATE)
x_train = [x[0] for x in x_train]
x_test = [x[0] for x in x_test]
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
train["Sentiment"] = pd.Series(y_train)
# Do the same for x_test and y_test
test = pd.DataFrame(data=x_test, columns=["very_neg", "slightly_neg", "neutral", "slightly_pos", "very_pos"])
test["Sentiment"] = pd.Series(y_test)
train_prediction = train["Sentiment"].values
test_prediction = test["Sentiment"].values
train_trimmed = train.drop(["Sentiment"], axis=1).values
test_trimmed = test.drop(["Sentiment"], axis=1).values
return train_trimmed, test_trimmed, train_prediction, test_prediction
def split(dataset, test_size, balanced=True):
if balanced:
return headlines_balanced_split(dataset, test_size)
else:
# TODO: write imbalanced split function
return None
# Helpers #
def sliding_window(seq, n=2):
"""
Returns a sliding window (of width n) over data from the iterable. https://stackoverflow.com/a/6822773/8740440
"""
"s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ..."
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def integrate(avg_daily_sentiment, interval):
"""
Takes a list of average daily sentiment scores and returns a list of definite integral estimations calculated
with Simpson's method. Each integral interval is determined by the `interval` variable. Shows accumulated sentiment.
"""
# Split into sliding window list of lists
sentiment_windows = sliding_window(avg_daily_sentiment, interval)
integral_simpson_est = []
# https://stackoverflow.com/a/13323861/8740440
for x in sentiment_windows:
# Estimate area using composite Simpson's rule. dx indicates the spacing of the data on the x-axis.
integral_simpson_est.append(simps(x, dx=1))
dead_values = list([None] * interval)
dead_values.extend(integral_simpson_est)
dead_values.reverse()
return dead_values
def random_undersampling(dataset):
"""
Randomly deleting rows that contain the majority class until the number
in the majority class is equal with the number in the minority class.
"""
minority_set = dataset[dataset.Trend == -1.0]
majority_set = dataset[dataset.Trend == 1.0]
# print(dataset.Trend.value_counts())
# If minority set larger than majority set, swap
if len(minority_set) > len(majority_set):
minority_set, majority_set = majority_set, minority_set
# Downsample majority class
majority_downsampled = resample(majority_set,
replace=False, # sample without replacement
n_samples=len(minority_set), # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
return pd.concat([majority_downsampled, minority_set])
def get_popularity(headlines):
# TODO: Randomize user-agents OR figure out how to handle popups
if "Tweets" not in headlines.columns:
counts = []
driver = webdriver.Chrome()
for index, row in headlines.iterrows():
try:
driver.get(row["URL"])
time.sleep(3)
twitter_containers = driver.find_elements_by_xpath("//li[@class='twitter']")
count = twitter_containers[0].find_elements_by_xpath("//span[@class='count']")
if count[0].text == "":
counts.append(1)
else:
counts.append(int(count[0].text))
except:
counts.append(1) # QUESTION: Should it be None?
headlines["Tweets"] = (pd.Series(counts)).values
print(counts)
return headlines
def balanced_split(dataset, test_size):
"""
Randomly splits dataset into balanced training and test sets.
"""
print("\tSplitting data into *balanced* training and test sets")
# Use sklearn.train_test_split to split original dataset into x_train, y_train, x_test, y_test numpy arrays
x_train, x_test, y_train, y_test = train_test_split(dataset.drop(["Date", "Trend"], axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
# Combine x_train and y_train (numpy arrays) into a single dataframe, with column labels
train = pd.DataFrame(data=x_train, columns=dataset.columns[1:-1])
train["Trend"] = pd.Series(y_train)
# Do the same for x_test and y__test
test = pd.DataFrame(data=x_test, columns=dataset.columns[1:-1])
test["Trend"] = pd.Series(y_test)
# Apply random undersampling to both data frames
train_downsampled = random_undersampling(train)
test_downsampled = random_undersampling(test)
train_trend = train_downsampled["Trend"].values
test_trend = test_downsampled["Trend"].values
train_trimmed = train_downsampled.drop(["Trend"], axis=1).values
test_trimmed = test_downsampled.drop(["Trend"], axis=1).values
return train_trimmed, test_trimmed, train_trend, test_trend
def unbalanced_split(dataset, test_size):
"""
Randomly splits dataset into unbalanced training and test sets.
"""
print("\tSplitting data into *unbalanced* training and test sets")
dataset = dataset.drop("Date", axis=1)
output = train_test_split(dataset.drop("Trend", axis=1).values, dataset["Trend"].values, test_size=test_size, random_state=RANDOM_STATE)
return output
# Main #
def calculate_indicators(ohlcv):
"""
Extracts technical indicators from OHLCV data.
"""
print("\tCalculating technical indicators")
ohlcv = ohlcv.drop(["Volume (BTC)", "Weighted Price"], axis=1)
ohlcv.columns = ["Date", "Open", "High", "Low", "Close", "Volume"]
temp_ohlcv = ohlcv.copy()
# Converts ISO 8601 timestamps to UNIX
unix_times = [int((dp.parse(temp_ohlcv.iloc[index]["Date"])).strftime("%s")) for index in range(temp_ohlcv.shape[0])]
temp_ohlcv["Date"] = (pd.Series(unix_times)).values
# Converts column headers to lowercase and sorts rows in chronological order
temp_ohlcv.columns = ["date", "open", "high", "low", "close", "volume"]
temp_ohlcv = temp_ohlcv.iloc[::-1]
# Rate of Change Ratio
rocr3 = ((Indicator(temp_ohlcv, "ROCR", 3)).getHistorical())[::-1]
rocr6 = ((Indicator(temp_ohlcv, "ROCR", 6)).getHistorical())[::-1]
# Average True Range
atr = ((Indicator(temp_ohlcv, "ATR", 14)).getHistorical())[::-1]
# On-Balance Volume
obv = ((Indicator(temp_ohlcv, "OBV")).getHistorical())[::-1]
# Triple Exponential Moving Average
trix = ((Indicator(temp_ohlcv, "TRIX", 20)).getHistorical())[::-1]
# Momentum
mom1 = ((Indicator(temp_ohlcv, "MOM", 1)).getHistorical())[::-1]
mom3 = ((Indicator(temp_ohlcv, "MOM", 3)).getHistorical())[::-1]
# Average Directional Index
adx14 = ((Indicator(temp_ohlcv, "ADX", 14)).getHistorical())[::-1]
adx20 = ((Indicator(temp_ohlcv, "ADX", 20)).getHistorical())[::-1]
# Williams %R
willr = ((Indicator(temp_ohlcv, "WILLR", 14)).getHistorical())[::-1]
# Relative Strength Index
rsi6 = ((Indicator(temp_ohlcv, "RSI", 6)).getHistorical())[::-1]
rsi12 = ((Indicator(temp_ohlcv, "RSI", 12)).getHistorical())[::-1]
# Moving Average Convergence Divergence
macd, macd_signal, macd_hist = (Indicator(temp_ohlcv, "MACD", 12, 26, 9)).getHistorical()
macd, macd_signal, macd_hist = macd[::-1], macd_signal[::-1], macd_hist[::-1]
# Exponential Moving Average
ema6 = ((Indicator(temp_ohlcv, "MA", 6, 1)).getHistorical())[::-1]
ema12 = ((Indicator(temp_ohlcv, "MA", 12, 1)).getHistorical())[::-1]
# Append indicators to the input datasets
min_length = min(len(mom1), len(mom3), len(adx14), len(adx20), len(willr), len(rsi6), len(rsi12), len(macd), len(macd_signal), len(macd_hist), len(ema6), len(ema12), len(rocr3), len(rocr6), len(atr), len(obv), len(trix))
ohlcv = ohlcv[:min_length].drop(["Open", "High", "Low"], axis=1)
ohlcv["MOM (1)"], ohlcv["MOM (3)"], ohlcv["ADX (14)"] = (pd.Series(mom1[:min_length])).values, (pd.Series(mom3[:min_length])).values, (pd.Series(adx14[:min_length])).values
ohlcv["ADX (20)"], ohlcv["WILLR"], ohlcv["RSI (6)"] = (pd.Series(adx20[:min_length])).values, (pd.Series(willr[:min_length])).values, (pd.Series(rsi6[:min_length])).values
ohlcv["RSI (12)"], ohlcv["MACD"], ohlcv["MACD (Signal)"] = (pd.Series(rsi12[:min_length])).values, (pd.Series(macd[:min_length])).values, (pd.Series(macd_signal[:min_length])).values
ohlcv["MACD (Historical)"], ohlcv["EMA (6)"], ohlcv["EMA (12)"] = (pd.Series(macd_hist[:min_length])).values, (pd.Series(ema6[:min_length])).values, (pd.Series(ema12[:min_length])).values
ohlcv["ROCR (3)"], ohlcv["ROCR (6)"], ohlcv["ATR (14)"] = (pd.Series(rocr3[:min_length])).values, (pd.Series(rocr6[:min_length])).values, (pd.Series(atr[:min_length])).values
ohlcv["OBV"], ohlcv["TRIX (20)"] = (pd.Series(obv[:min_length])).values, (pd.Series(trix[:min_length])).values
return ohlcv
def calculate_sentiment(headlines):
sentiment_scores = {}
numer, denom = 0.0, 0.0
for index, currRow in headlines.iterrows():
print(currRow)
currDate = currRow["Date"]
if currDate in sentiment_scores:
pass
else:
numer = currRow["Sentiment"] * currRow["Tweets"]
denom = currRow["Tweets"]
for index, nextRow in headlines.iloc[index + 1:].iterrows():
if nextRow["Date"] == currDate:
numer += (nextRow["Sentiment"] * nextRow["Tweets"])
denom += nextRow["Tweets"]
else:
break
sentiment_scores[currDate] = numer / denom
numer, denom = 0.0, 0.0
sentiment_scores_df = pd.DataFrame(list(sentiment_scores.items()), columns=["Date", "Sentiment"])
return sentiment_scores_df
def merge_datasets(origin, other_sets):
print("\tMerging datasets")
merged = origin
for set in other_sets:
merged = pd.merge(merged, set, on="Date")
return merged
def fix_null_vals(dataset):
"""Implements the Last Observation Carried Forward (LOCF) method to fill missing values."""
print("\tFixing null values")
if not dataset.isnull().any().any():
return dataset
else:
return dataset.fillna(method="ffill")
def binarize_labels(dataset):
"""Transforms daily price data into binary values indicating price change."""
print("\tBinarizing price movements")
trends = [None]
for index in range(dataset.shape[0] - 1):
difference = dataset.iloc[index]["Close"] - dataset.iloc[index + 1]["Close"]
if difference < 0:
trends.append(-1)
else:
trends.append(1)
dataset["Trend"] = (pd.Series(trends)).values
dataset = dataset.drop(dataset.index[0])
return dataset
def add_lag_variables(dataset, lag=3):
print("\tAdding lag variables")
new_df_dict = {}
for col_header in dataset.drop(["Date", "Trend"], axis=1):
new_df_dict[col_header] = dataset[col_header]
for lag in range(1, lag + 1):
new_df_dict["%s_lag%d" % (col_header, lag)] = dataset[col_header].shift(-lag)
new_df = pd.DataFrame(new_df_dict, index=dataset.index)
new_df["Date"], new_df["Trend"] = dataset["Date"], dataset["Trend"]
return new_df.dropna()
def power_transform(dataset):
print("\tApplying a box-cox transform to selected features")
for header in dataset.drop(["Date", "Trend"], axis=1).columns:
if not (dataset[header] < 0).any() and not (dataset[header] == 0).any():
dataset[header] = boxcox(dataset[header])[0]
return dataset
def split(dataset, test_size, balanced=True):
# TODO: Splits can't be random, they need to respect the temporal order of each observation
if balanced:
return balanced_split(dataset, test_size)
else:
return unbalanced_split(dataset, test_size)
def integral_transform(dataset, interval):
integral = integrate(list(dataset["Sentiment"]), interval)
dataset["Sentiment_integrals"] =
|
pd.Series(integral)
|
pandas.Series
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 2018
@author: toshiki.ishikawa
"""
import os
import sys
import gc
import utils
import datetime
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
from multiprocessing import cpu_count, Pool
utils.start(__file__)
#==============================================================================
NTHREAD = cpu_count()
PREF = 'f102'
SUMMARY = 30
KEY = 'card_id'
stats = ['min', 'max', 'mean', 'median', 'std', 'std', 'skew']
# =============================================================================
#
# =============================================================================
PATH = os.path.join('..', 'data')
historical_transactions = pd.read_csv(os.path.join(PATH, 'historical_transactions.csv'))
historical_transactions['installments'].replace(-1, np.nan, inplace=True)
historical_transactions['installments'].replace(999, np.nan, inplace=True)
# historical_transactions['purchase_amount'] = np.log1p(historical_transactions['purchase_amount'] - historical_transactions['purchase_amount'].min())
historical_transactions['purchase_amount'] = np.round(historical_transactions['purchase_amount'] / 0.00150265118 + 497.06,2)
historical_transactions['purchase_date'] =
|
pd.to_datetime(historical_transactions['purchase_date'])
|
pandas.to_datetime
|
from itertools import combinations
from math import exp, expm1, log1p, log10, log2, sqrt, ceil, floor, radians, sin, cos
from random import choice, sample, uniform
import time
#pyData stack
import numpy as np
import pandas as pd
from scipy import sparse
#sklearn preprocessing, model selection
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
#sklearn classifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.metrics import jaccard_similarity_score, accuracy_score
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.decomposition import TruncatedSVD, NMF, KernelPCA
import lightgbm as lgb
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers.merge import concatenate
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.utils.np_utils import to_categorical
def to_time(df, f_time='time'):
df[f_time] = pd.to_datetime(df[f_time], unit='s')
#numeric
#f_mday = 'inf_scl_{}_day'.format(f_time)
f_hour = 'inf_hour'
f_wday = 'inf_wday'
f_week = 'inf_week'
f_wdhr = 'inf_wdhr'
#f_week = 'inf_{}_week'.format(f_time)
#d, h, m, w = 31, 24, 60, 7
#df[f_mday] = df[f_time].dt.day# /d
df[f_hour] = df[f_time].dt.hour# /h
df[f_wday] = df[f_time].dt.dayofweek# /w
df[f_week] = df[f_time].dt.week
df[f_wdhr] = df[f_wday] * 24 + df[f_hour]
df[f_wdhr] = df[f_wdhr].apply(str)
#print(df.describe())
#string
def titles_agg(train_data, test_data, hist, stem='tmp', last_only=False):
print('{}:\t{} records'.format(stem, hist.shape[0]), flush=True)
col = 'list_ttl_{}'.format(stem)
#list and count
if last_only:
col = 'list_ttl_{}_last_only'.format(stem)
tmp = hist.groupby('user_id')['title_id'].agg(' '.join).apply(lambda x: x.split()[-1])
else:
col = 'list_ttl_{}'.format(stem)
tmp = hist.groupby('user_id')['title_id'].agg(' '.join)#.apply(lambda x: x.split())
tmp = tmp.rename(col).to_frame()
tmp['user_id'] = tmp.index
tmp = tmp.reset_index(drop=True)
train_data = train_data.merge(tmp, how='left', on='user_id')
test_data = test_data.merge(tmp, how='left', on='user_id')
train_data = train_data.fillna('')
test_data = test_data.fillna('')
if last_only:
del tmp
col = 'f_time_lastest_{}_last_only'.format(stem)
tmp = hist.groupby('user_id')['watch_time'].agg(lambda x: ' '.join(str(x))).apply(lambda x: x.split()[-1])
tmp = tmp.rename(col).to_frame()
tmp['user_id'] = tmp.index
tmp = tmp.reset_index(drop=True)
train_data = train_data.merge(tmp, how='left', on='user_id')
test_data = test_data.merge(tmp, how='left', on='user_id')
else:
train_data['f_cnt_{}'.format(stem)] = train_data[col].apply(lambda x: len(x.split()))
test_data['f_cnt_{}'.format(stem)] = test_data[col].apply(lambda x: len(x.split()))
del tmp
return train_data, test_data
#int
def sum_watch_time(train_data, test_data, hist, stem='tmp'):
#sum time
tmp = hist.groupby('user_id')['watch_time'].sum()
tmp = tmp.rename('f_time_sum_{}'.format(stem)).to_frame()
tmp['user_id'] = tmp.index
tmp = tmp.reset_index(drop=True)
#merge
train_data = train_data.merge(tmp, how='left', on='user_id')
test_data = test_data.merge(tmp, how='left', on='user_id')
del tmp
#var time
tmp = hist.groupby('user_id')['watch_time'].var()
tmp = tmp.rename('f_time_var_{}'.format(stem)).to_frame()
tmp['user_id'] = tmp.index
tmp = tmp.reset_index(drop=True)
#merge
train_data = train_data.merge(tmp, how='left', on='user_id')
test_data = test_data.merge(tmp, how='left', on='user_id')
del tmp
#median time
tmp = hist.groupby('user_id')['watch_time'].median()
tmp = tmp.rename('f_time_median_{}'.format(stem)).to_frame()
tmp['user_id'] = tmp.index
tmp = tmp.reset_index(drop=True)
#merge
train_data = train_data.merge(tmp, how='left', on='user_id')
test_data = test_data.merge(tmp, how='left', on='user_id')
del tmp
train_data = train_data.fillna(0)
test_data = test_data.fillna(0)
#print(train_data)
return train_data, test_data
#string
def trigger_time(train_data, test_data, hist, stem='tmp'):
tmp = hist.groupby('user_id')['inf_wdhr'].agg(' '.join)#.apply(lambda x: x.split())
tmp = tmp.rename('list_trg_{}'.format(stem)).to_frame()
tmp['user_id'] = tmp.index
tmp = tmp.reset_index(drop=True)
#merge
train_data = train_data.merge(tmp, how='left', on='user_id')
train_data = train_data.fillna('')
train_data['f_cnt_{}'.format(stem)] = train_data['list_trg_{}'.format(stem)].apply(lambda x: len(x.split()))
test_data = test_data.merge(tmp, how='left', on='user_id')
test_data = test_data.fillna('')
test_data['f_cnt_{}'.format(stem)] = test_data['list_trg_{}'.format(stem)].apply(lambda x: len(x.split()))
del tmp
return train_data, test_data
#evaluation
def display_val_score(y, p, r):
v = np.argmax(p, axis=1)
jcc = jaccard_similarity_score(y, v)
acc = accuracy_score(y, v)
print('\nVal: jcc={:.6f}, acc={:.6f}'.format(jcc, acc), flush=True)
print('Adjusted Val: jcc={:.6f}, acc={:.6f}'.format(jcc * ratio, acc * ratio), flush=True)
return jcc
#
def write_csv(test_id, labels, t='t', stem='', score=0):
print("\nWriting output...\n")
sub = pd.DataFrame()
sub['user_id'] = test_id
sub['title_id'] = labels
print(sub['title_id'].value_counts())
sub.to_csv("preds_{}_{}_s{:.6f}.csv".format(stem, t, jcc * ratio), index=False)
#read
input_folder = '../input/'
####train
train_events = pd.read_csv(input_folder + 'events_train.csv', dtype={'user_id': np.str, 'title_id': np.str})
train_users =
|
pd.read_csv(input_folder + 'labels_train.csv', dtype={'user_id': np.str, 'title_id': np.str})
|
pandas.read_csv
|
# Copyright (C) 2020 <NAME> / <EMAIL>, <NAME> / <EMAIL>
#
# This is logAnalyzer
#
# logAnalyzer is free software: you can redistribute it and/or modify
# it under the terms of the 3-clause BSD License.
#
# logAnalyzer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind whatsoever.
#
import textfsm
import pandas as pd
import csv
import xlsxwriter
import glob
import argparse
import yaml
from sys import platform as _platform
def readTemplate(fileTemplate):
# Read the list of templates passed by CSV of textFSM and return template read list (read)
# list of parsed variable names, list of template names
with open(fileTemplate,'r') as fTemplate:
reader = csv.reader(fTemplate)
templates = list(reader)
cantTemplate = len(templates)
template = []
var = []
index = []
for t in range(cantTemplate):
template.append(open('Templates/'+templates[t][0]))
print(template[t])
var.append(template[t].readlines())
r1 = len(var[t])
var1 = []
index.append([])
for i1 in range(r1):
h1 = var[t][i1].find('Value')
if h1 != -1:
var1 = var[t][i1].split(' ')
index[t].append(var1[-2])
print('#####Successfully Loaded Templates#####')
return template, index, templates
def makeParsed(nomTemplate, routerLog): #Parse through textFSM (reading the file again)
template = open('Templates/'+nomTemplate)
results_template = textfsm.TextFSM(template)
parsed_results = results_template.ParseText (routerLog)
return parsed_results
def readLog(logFolder): #Reads CSV, and stores router logs in memory for processing
if _platform == "linux" or _platform == "linux2" or _platform == "darwin":
# linux
listContent = [f for f in glob.glob(logFolder + '*rx.txt')]
routers = [[f.split("/")[1]] for f in listContent]
elif _platform == "win64" or _platform == "win32":
# Windows 64-bit
listContent = [f for f in glob.glob(logFolder + '*rx.txt')]
routers = [[f.split("\\")[1]] for f in listContent]
content = []
for f in listContent:
fopen = open(f,'r')
content.append(fopen.read())
fopen.close()
print('#########Logs Loaded Successfully#########')
return content, routers
def verifyMajorFile(majorFile):
"""We verify the majorFile.yml before moving on.
Returns:
[dict]: [Dictionary with words of major information, for templates if have any words additional to down]
"""
try:
with open(majorFile,'r') as f:
majorMatrix = yaml.load(f, Loader=yaml.FullLoader)
except:
print("Missing " + majorFile + " file. Quitting..")
quit()
return majorMatrix
def parseResults(read_template, index, content, templates, routers): #Build the Dataframe from textFSM filter, index and router log
datosEquipo = {}
cantTemplate = len(templates)
cantRouters = len(content)
for i in range(cantTemplate):
nomTemplate = templates[i][0]
columnss = index[i]
dfTemp = pd.DataFrame(columns=columnss)
for i1 in range(cantRouters):
print(routers[i1][0] , nomTemplate)
routerLog = content[i1]
parsed_results = makeParsed(nomTemplate, routerLog)
if len(parsed_results) == 0:
# if the parse is empty, we save the name of the routers
parsed_results = [routers[i1][0]]
for empty in range(len(columnss)-1):
parsed_results.append('NOT VALUE')
parsed_results=[parsed_results]
dfResult = pd.DataFrame(parsed_results, columns= columnss)
else:
dfResult = pd.DataFrame(parsed_results, columns= columnss)
dfTemp = pd.concat([dfTemp, dfResult])
# It is stored in the dataEquipment dictionary with the key nomTemplate
# the DF with the data of all routers
datosEquipo[nomTemplate] = dfTemp
# I added this here because it was already done in main ().
# It is cleaner like this ...
datosEquipo[nomTemplate].reset_index(level=0, inplace=True)
datosEquipo[nomTemplate] = datosEquipo[nomTemplate].drop(columns='index')
return datosEquipo
def searchDiff(datosEquipoPre, datosEquipoPost):#Makes a new table, in which it brings the differences between two tables (post-pre)
countDif = {}
for key in datosEquipoPre.keys():
dfUnion = pd.merge(datosEquipoPre[key], datosEquipoPost[key], how='outer', indicator='Where').drop_duplicates()
dfInter = dfUnion[dfUnion.Where=='both']
dfCompl = dfUnion[~(dfUnion.isin(dfInter))].dropna(axis=0, how='all').drop_duplicates()
dfCompl['Where'] = dfCompl['Where'].str.replace('left_only','Pre')
dfCompl['Where'] = dfCompl['Where'].str.replace('right_only','Post')
countDif[key] = dfCompl.sort_values(by=['NAME'])
return countDif
def findMajor(count_dif):#Makes a table from the results of searching for Major errors in the post table define in yml file for specific template, or down if is not define the words for the template, which are not in the Pre table
countDown = {}
majorWords = verifyMajorFile('Templates/majorFile.yml')
for key in count_dif.keys():
if key in majorWords:
majorWords[key].append('down')
df = pd.DataFrame()
for j in majorWords[key]:
df1 = count_dif[key][count_dif[key]['Where']=='Post']
if len(df1) > 0:
df1 = df1[df1.apply(lambda r: r.str.contains(j, case=False).any(), axis=1)]
else:
df1 = pd.DataFrame(columns=count_dif[key].columns)
df=pd.concat([df, df1])
countDown[key] = df
else:
df = count_dif[key][count_dif[key]['Where']=='Post']
if len(df) > 0:
df = df[df.apply(lambda r: r.str.contains('down', case=False).any(), axis=1)]
else:
df = pd.DataFrame(columns=count_dif[key].columns)
countDown[key] = df
return countDown
def makeTable(datosEquipoPre, datosEquipoPost):#Sort the table pre and post to present in Excel
df_all = {}
datosEquipoPre1 = datosEquipoPre.copy()
for temp in datosEquipoPre.keys():
datosEquipoPre1[temp]['##']='##'
df_all[temp] = pd.concat([datosEquipoPre1[temp], datosEquipoPost[temp]], axis=1, keys=('Pre-Check', 'Post-Check'))
return df_all
def constructExcel(df_final, count_dif, searchMajor, folderLog):#Sort the data and format creating the Excel
fileName = folderLog[:-1] + ".xlsx"
writer =
|
pd.ExcelWriter(fileName, engine='xlsxwriter')
|
pandas.ExcelWriter
|
"""
Preprocess OA lookup table
"""
import os
import csv
import configparser
import pandas as pd
import geopandas as gpd
import math
import random
from shapely.geometry import mapping, MultiPolygon
from tqdm import tqdm
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
random.seed(43)
def process_shapes(path_output, path_ew, path_scot, lookup):
"""
Process all shape boundaries for ~8,000 areas.
"""
folder = os.path.join(BASE_PATH, 'intermediate')
if not os.path.exists(os.path.join(folder, 'output_areas.csv')):
data_ew = gpd.read_file(path_ew, crs='epsg:27700')#[:10]
data_ew = data_ew[['msoa11cd', 'geometry']]
data_ew.columns = ['msoa', 'geometry']
data_scot = gpd.read_file(path_scot, crs='epsg:27700')#[:200]
data_scot = data_scot[['InterZone', 'geometry']]
data_scot.columns = ['msoa', 'geometry']
all_data = data_ew.append(data_scot, ignore_index=True)
all_data['geometry'] = all_data.apply(remove_small_shapes, axis=1)
all_data['geometry'] = all_data.simplify(
tolerance = 10,
preserve_topology=True).buffer(0.0001).simplify(
tolerance = 10,
preserve_topology=True
)
all_data['area_km2'] = all_data['geometry'].area / 1e6
lookup = pd.read_csv(lookup)
lookup = lookup[['MSOA11CD', 'RGN11NM']]
lookup = lookup.drop_duplicates()
lookup.columns = ['msoa', 'region']
all_data = (pd.merge(all_data, lookup, on='msoa'))
all_data.to_file(path_output, crs='epsg:27700')
all_data = all_data[['msoa', 'area_km2', 'region']]
out_path = os.path.join(folder, 'output_areas.csv')
all_data.to_csv(out_path, index=False)
else:
all_data = pd.read_csv(os.path.join(folder, 'output_areas.csv'))
return all_data
def remove_small_shapes(x):
"""
Get rid of small geometries.
"""
# if its a single polygon, just return the polygon geometry
if x.geometry.geom_type == 'Polygon':
return x.geometry
# if its a multipolygon, we start trying to simplify
# and remove shapes if its too big.
elif x.geometry.geom_type == 'MultiPolygon':
area1 = 1e7
area2 = 5e7
# don't remove shapes if total area is already very small
if x.geometry.area < area1:
return x.geometry
if x.geometry.area > area2:
threshold = 5e6
else:
threshold = 5e6
# save remaining polygons as new multipolygon for
# the specific country
new_geom = []
for y in x.geometry:
if y.area > threshold:
new_geom.append(y)
return MultiPolygon(new_geom)
def process_area_features(path_output, data):
"""
Load shapes and extract required urban/rural information.
"""
data = all_data.to_dict('records')
output = {}
for item in data:
output[item['msoa']] = {
'area_km2': item['area_km2'],
'region': item['region'],
}
return output
def get_lads(path):
"""
Get all unique Local Authority District IDs.
"""
path_output = os.path.join(BASE_PATH, 'intermediate', 'prems_by_lad_msoa')
if not os.path.exists(path_output):
os.makedirs(path_output)
all_data = pd.read_csv(path)
all_data = all_data.to_dict('records')
unique_lads = set()
for item in all_data:
unique_lads.add(item['LAD17CD'])
unique_lads = list(unique_lads)#[:10]
for lad in list(unique_lads):
path_lad = os.path.join(path_output, lad)
if not os.path.exists(path_lad):
os.makedirs(path_lad)
lookup = []
for item in all_data:
if lad == item['LAD17CD']:
lookup.append({
'OA11CD': item['OA11CD'],
'LSOA11CD': item['LSOA11CD'],
'MSOA11CD': item['MSOA11CD']
})
lookup = pd.DataFrame(lookup)
lookup.to_csv(os.path.join(path_lad, 'lookup.csv'), index=False)
return list(unique_lads)
def get_lookup(lad):
"""
Create a lookup table for all Middle Super Output Areas (area) (~8,000) to
lower-level Output Areas (~190,000).
"""
folder = os.path.join(BASE_PATH, 'intermediate', 'prems_by_lad_msoa', lad)
path = os.path.join(folder, 'lookup.csv')
all_data = pd.read_csv(path)
unique_msoas = all_data['MSOA11CD'].unique()
all_data = all_data.to_dict('records')
lookup = {}
for msoa in unique_msoas:
oa_ids = []
for item in all_data:
if msoa == item['MSOA11CD']:
oa_ids.append(item['OA11CD'])
lookup[msoa] = oa_ids
return unique_msoas, lookup
def write_premises_data(lad):
"""
Aggregate Output Area premises data into Middle Super Output Areas and write.
"""
path_lad = os.path.join(BASE_PATH, 'prems_by_lad', lad)
unique_msoas, lookup = get_lookup(lad)
directory = os.path.join(BASE_PATH, 'intermediate', 'prems_by_lad_msoa', lad)
for msoa in unique_msoas:
path_output = os.path.join(directory, msoa + '.csv')
if os.path.exists(path_output):
continue
oas = lookup[msoa]
prems_by_msoa = []
for oa in oas:
path_oa = os.path.join(path_lad, oa + '.csv')
if not os.path.exists(path_oa):
continue
prems = pd.read_csv(path_oa)
prems = prems.to_dict('records')
for prem in prems:
prems_by_msoa.append({
'mistral_function_class': prem['mistral_function_class'],
'mistral_building_class': prem['mistral_building_class'],
'res_count': prem['res_count'],
'floor_area': prem['floor_area'],
'height_toroofbase': prem['height_toroofbase'],
'height_torooftop': prem['height_torooftop'],
'nonres_count': prem['nonres_count'],
'number_of_floors': prem['number_of_floors'],
'footprint_area': prem['footprint_area'],
'geometry': prem['geom'],
})
prems_by_msoa = pd.DataFrame(prems_by_msoa)
prems_by_msoa.to_csv(path_output, index=False)
def write_hh_data(lad):
"""
Get the estimated household demographics for each area.
"""
filename = 'ass_{}_area11_2018.csv'.format(lad)
path = os.path.join(BASE_PATH, 'hh_demographics_msoa_2018', filename)
if not os.path.exists(path):
return
unique_msoas, lookup = get_lookup(lad)
directory = os.path.join(BASE_PATH, 'intermediate', 'hh_by_lad_msoa', lad)
if not os.path.exists(directory):
os.makedirs(directory)
hh_data = pd.read_csv(path)
for msoa in unique_msoas:
path_output = os.path.join(directory, msoa + '.csv')
if os.path.exists(path_output):
continue
hh_msoa_data = hh_data.loc[hh_data['Area'] == msoa]
hh_msoa_data.to_csv(path_output, index=False)
def generate_msoa_lookup(unique_lads, area_features):
"""
Load in all data for each area to generate a single lookup table.
"""
output = []
for lad in unique_lads:#[:1]:
hh_folder = os.path.join(BASE_PATH, 'intermediate', 'hh_by_lad_msoa', lad)
prems_folder = os.path.join(BASE_PATH, 'intermediate', 'prems_by_lad_msoa', lad)
unique_msoas, lookup = get_lookup(lad)
for msoa in unique_msoas:
results = get_area_stats(msoa, lad, hh_folder, prems_folder, area_features)
if not results == 'path does not exist':
output.append(results)
return output
def get_area_stats(msoa, lad, hh_folder, prems_folder, area_features):
"""
Get the area statistics for a single area.
"""
path = os.path.join(hh_folder, msoa + '.csv')
if not os.path.exists(path):
return 'path does not exist'
hh_data = pd.read_csv(path)
hh_data = hh_data.to_dict('records')
households = set()
population = set()
for row in hh_data:
households.add(row['HID'])
population.add(row['PID'])
path = os.path.join(prems_folder, msoa + '.csv')
if not os.path.exists(path):
return 'path does not exist'
try:
prems_data =
|
pd.read_csv(path)
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import pytest
import scipy.stats
from pyextremes import EVA, get_model
@pytest.fixture(scope="function")
def eva_model(battery_wl_preprocessed) -> EVA:
return EVA(data=battery_wl_preprocessed)
@pytest.fixture(scope="function")
def eva_model_bm(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
return eva_model
@pytest.fixture(scope="function")
def eva_model_pot(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="POT",
extremes_type="high",
threshold=1.35,
r="24H",
)
return eva_model
@pytest.fixture(scope="function")
def eva_model_bm_mle(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
eva_model.fit_model("MLE")
return eva_model
@pytest.fixture(scope="function")
def eva_model_bm_emcee(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="BM",
extremes_type="high",
block_size="1Y",
errors="raise",
)
eva_model.fit_model("Emcee", n_walkers=10, n_samples=100)
return eva_model
@pytest.fixture(scope="function")
def eva_model_pot_mle(battery_wl_preprocessed) -> EVA:
eva_model = EVA(data=battery_wl_preprocessed)
eva_model.get_extremes(
method="POT",
extremes_type="high",
threshold=1.35,
r="24H",
)
eva_model.fit_model("MLE")
return eva_model
class TestEVA:
def test_init_errors(self):
with pytest.raises(
TypeError, match=r"invalid type.*'data' argument.*pandas.Series"
):
EVA(data=1)
with pytest.warns(RuntimeWarning, match=r"'data'.*not numeric.*converted"):
eva_model = EVA(
data=pd.Series(
data=["1", "2", "3"],
index=pd.DatetimeIndex(["2020", "2021", "2022"]),
)
)
assert np.allclose(eva_model.data.values, [1, 2, 3])
with pytest.raises(TypeError, match=r"invalid dtype.*'data' argument.*numeric"):
EVA(
data=pd.Series(
data=["a", "b", "c"],
index=pd.DatetimeIndex(["2020", "2021", "2022"]),
)
)
with pytest.raises(TypeError, match=r"index of 'data'.*date-time.*not"):
EVA(data=pd.Series(data=[1, 2, 3], index=["2020", "2021", "2022"]))
with pytest.warns(RuntimeWarning, match=r"index is not sorted.*sorting"):
eva_model = EVA(
data=pd.Series(
data=[1, 2, 3],
index=pd.DatetimeIndex(["2022", "2021", "2020"]),
)
)
assert np.allclose(eva_model.data.index.year.values, [2020, 2021, 2022])
with pytest.warns(RuntimeWarning, match=r"Null values found.*removing invalid"):
eva_model = EVA(
data=pd.Series(
data=[1, 2, np.nan, 3],
index=pd.DatetimeIndex(["2020", "2021", "2022", "2023"]),
)
)
assert np.allclose(eva_model.data.values, [1, 2, 3])
assert np.allclose(eva_model.data.index.year.values, [2020, 2021, 2023])
def test_init_attributes(self, eva_model):
# Ensure that 'data' attribute is properly processed
assert isinstance(eva_model.data, pd.Series)
assert np.issubdtype(eva_model.data.dtype, np.number)
assert eva_model.data.index.is_all_dates
assert eva_model.data.index.is_monotonic_increasing
assert eva_model.data.isna().sum() == 0
# Ensure model attributes exist and are all None
for param in [
"extremes",
"extremes_method",
"extremes_type",
"extremes_kwargs",
"extremes_transformer",
]:
with pytest.raises(AttributeError, match=r"extreme values must first"):
getattr(eva_model, param)
with pytest.raises(AttributeError, match=r"model must first"):
eva_model.model
@pytest.mark.parametrize(
"input_params",
[
{
"method": "BM",
"extremes_type": "high",
"block_size": "1Y",
"errors": "raise",
},
{
"method": "BM",
"extremes_type": "high",
},
{
"method": "POT",
"extremes_type": "high",
"threshold": 1.35,
"r": "24H",
},
{
"method": "POT",
"extremes_type": "high",
"threshold": 1.35,
},
],
)
def test_get_extremes(self, eva_model, input_params):
# Get extremes
eva_model.get_extremes(**input_params)
# Test attributes
assert eva_model.extremes_method == input_params["method"]
assert eva_model.extremes_type == input_params["extremes_type"]
if input_params["method"] == "BM":
assert len(eva_model.extremes_kwargs) == 2
assert eva_model.extremes_kwargs["block_size"] ==
|
pd.to_timedelta("1Y")
|
pandas.to_timedelta
|
#! /usr/bin/env python
##! /usr/bin/arch -x86_64 /usr/bin/env python
from logging import error
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
import datetime as dt
from plotly.subplots import make_subplots
from dash.dependencies import Input, Output, State
from pprint import pprint
import waitress
import json
import re
import argparse
import os
import zlib
import math
import textwrap
from ordered_set import OrderedSet
import natsort
from zipfile import ZipFile
from bs4 import BeautifulSoup # you also need to install "lxml" for the XML parser
from tabulate import tabulate
from collections import OrderedDict
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
print("############################################")
print("############################################")
print("############################################")
print("############################################")
debug=False
def DebugMsg(msg1,msg2=None,printmsg=True):
if debug and printmsg:
print(dt.datetime.now().strftime("%c"),end=" " )
print(msg1,end=" " )
if msg2 is not None:
print(msg2)
print("")
def DebugMsg2(msg1,msg2=None,printmsg=True):
DebugMsg(msg1,msg2,printmsg)
def DebugMsg3(msg1,msg2=None,printmsg=True):
DebugMsg(msg1,msg2,printmsg)
def Info(msg1,msg2=None,printmsg=True):
DebugMsg(msg1,msg2,printmsg)
def get_xlsx_sheet_names(xlsx_file,return_As_dropdown_options=False):
with ZipFile(xlsx_file) as zipped_file:
summary = zipped_file.open(r'xl/workbook.xml').read()
soup = BeautifulSoup(summary, "html.parser")
sheets = [sheet.get("name") for sheet in soup.find_all("sheet")]
if return_As_dropdown_options:
doptions=[]
for sheet in sheets:
doptions.append({"label": sheet, "value": sheet})
return doptions
else:
return sheets
# assume you have a "long-form" data frame
# see https://plotly.com/python/px-arguments/ for more options
class Dashboard:
def __init__(self, datafile,isxlsx=False,sheetname=None,skiprows=0,replace_with_nan=None, DashboardMode=False):
self.RecentFilesListPath="./recent"
self.DashboardMode=DashboardMode
self.ComparisonFunctionalityPlaceholder()
df_index=self.default_df_index
# self.setDataFile(datafile,isxlsx,sheetname,skiprows,replace_with_nan,df_index)
self.createDashboard(df_index,self.DashboardMode)
self.app = dash.Dash(external_scripts=["./dashboard.css"])
# self.app = dash.Dash()
self.app.layout = html.Div(self.layout())
def reset_df_index(self,idx):
self.df[idx]=None
self.filtered_df[idx]=None
self.plot_df[idx]=None
self.DataFile[idx]=None
def ComparisonFunctionalityPlaceholder(self):
self.df_indexes=["1","2"]
self.current_df_index="1"
self.default_df_index="1"
self.df=dict()
self.filtered_df=dict()
self.plot_df=dict()
self.DataFile=dict()
for idx in self.df_indexes:
self.df[idx]=None
self.filtered_df[idx]=None
self.plot_df[idx]=None
self.DataFile[idx]=None
def createDashboard(self, df_index, DashboardMode=False):
self.init_constants()
# self.df=None
self.reset=False
self.newXAxisColName = "#"
self.DatatoDownload = None
self.ControlMode=not DashboardMode
self.GlobalParams={}
self.GlobalParams['available_legends']=OrderedSet()
self.GlobalParams['SecAxisTitles']=OrderedSet()
self.GlobalParams['PrimaryAxisTitles']=OrderedSet()
self.GlobalParams['LegendTitle']="Legend"
self.GlobalParams['Datatable_columns']=[]
self.GlobalParams['columns_updated']=False
self.GlobalParams['PreAggregatedData']=True
tmp=None
if self.DataFile[df_index] is not None :
tmp=self.loadMetadata(df_index,"LastGraph")
if tmp is not None:
self.GraphParams = tmp
self.update_aggregate()
else:
self.initialize_GraphParams()
self.update_aggregate()
self.groups = [[json.dumps(self.GraphParams)]]
self.DF_read_copy = dict()
self.readFileInitDash(df_index)
self.updateGraphList(df_index)
self.filtered_df[df_index] = self.df[df_index].copy()
self.plot_df[df_index]=self.filtered_df[df_index]
self.table_df=self.filtered_df[df_index]
self.initialize_figs()
#self.update_graph()
def setDataFile(self,datafile,isxlsx,sheetname,skiprows,replace_with_nan,df_index):
if datafile is not None:
datafile1=os.path.abspath(datafile)
self.DataFile[df_index] = {'Path': datafile1,
'isXlsx':isxlsx,
'Sheet': sheetname,
'SkipRows': skiprows,
'ReplaceWithNan' : replace_with_nan,
'LastModified' : 0 ,
'MetadataFile' : datafile + ".dashjsondata" ,
}
self.update_df(self.DataFile[df_index],df_index)
self.updateRecentFiles(df_index)
else:
self.DataFile[df_index]=None
self.reset_df_index(df_index)
self.updateRecentFiles(df_index)
def initialize_GraphParams(self):
self.GraphParams["GraphId"] = ""
self.GraphParams["Name"] = ""
self.GraphParams["Xaxis"] = []
self.GraphParams["GraphType"] = "Scatter"
self.GraphParams["Primary_Yaxis"] = []
self.GraphParams["Primary_Legends"] = []
self.GraphParams["Aggregate_Func"] = []
self.GraphParams["Secondary_Legends"] = []
self.GraphParams["Aggregate"] = []
self.GraphParams["Scatter_Labels"] = []
self.GraphParams["SortBy"] = []
self.GraphParams["Filters"] = ""
self.GraphParams["FilterAgregatedData"] = ""
self.GraphParams["SortAgregatedData"] = ""
self.GraphParams["PreviousOperations"] = []
self.GraphParams["ShowPreAggregatedData"] = []
def loadMetadata(self,df_index,header=None):
jsondata=None
if self.DataFile[df_index] is not None and os.path.exists(self.DataFile[df_index]['MetadataFile']):
with open(self.DataFile[df_index]['MetadataFile']) as json_file:
jsondata=json.load(json_file)
if jsondata is not None and header is not None:
if header in jsondata:
jsondata=jsondata[header]
else:
jsondata=None
return jsondata
def updateMetadata(self,header,data,df_index):
jsondata=self.loadMetadata(df_index)
if jsondata is None:
jsondata=dict()
jsondata[header]=data
with open(self.DataFile[df_index]['MetadataFile'], "w") as outfile:
json.dump(jsondata,outfile)
def updateGraphList(self,df_index):
if self.DataFile[df_index] is not None:
self.SavedGraphList= self.getGraphList(df_index,'SavedGraphs')
self.HistoricalGraphList= self.getGraphList(df_index,'HistoricalGraphs')
else:
self.SavedGraphList= dict()
self.HistoricalGraphList= dict()
def getGraphList(self,df_index,type):
# type can be SavedGraphs/HistoricalGraphs
x=self.loadMetadata(df_index,type)
if x is None:
return dict()
else:
return x
def set_Graphid(self):
x=self.GraphParams.copy()
x['GraphId']=""
x['Name']=""
self.GraphParams['GraphId']=zlib.adler32(bytes(json.dumps(x),'UTF-8'))
return id
def update_dtypes(self,df1):
for col in self.dtypes:
if col in df1.columns:
if self.dtypes[col] == 'datetime':
df1[col]=pd.to_datetime(df1[col])
else:
df1[col]=df1[col].astype(self.dtypes[col])
return df1
def get_dypes(self,cols):
if cols is None:
dtypes=self.df[self.default_df_index].dtypes.to_frame('dtypes')['dtypes'].astype(str).to_dict()
else:
dtypes=self.df[self.default_df_index][cols].dtypes.to_frame('dtypes')['dtypes'].astype(str).to_dict()
return dtypes
def update_dtype(self,cols,dtype,custom_datetime_fmt):
update_done=False
for col in cols:
for idx in self.df_indexes:
if self.df[idx] is not None:
if dtype == 'datetime_custom_format':
self.df[idx][col]=pd.to_datetime(self.df[idx][col],format=custom_datetime_fmt,errors='coerce')
else:
self.df[idx][col]=self.df[idx][col].astype(self.AvailableDataTypes[dtype])
update_done=True
if update_done:
dtypes=self.df[self.default_df_index].dtypes.to_frame('dtypes').reset_index().set_index('index')['dtypes'].astype(str).to_dict()
self.updateMetadata("ColumnsDataTypes",dtypes,self.default_df_index)
def init_constants(self):
self.dtypes= {
'MasterJobId' : str ,
'jobid' : str ,
'jobindex' : float ,
'token' : str ,
'cluster' : str ,
'mem_bucketed' : float ,
'step' : str ,
'submit_time' : 'datetime' ,
'mem_before_bucket' : str ,
'lineno' : float ,
'mem_selection_reason' : str ,
'status' : str ,
'completion_time' : 'datetime' ,
'EosFlowVersion' : str ,
'PegasusVersion' : str ,
'Sandboxpath' : str ,
'RepeatabilityMode' : bool ,
'MasterStartTime' : 'datetime' ,
'LastRecordedTime' : 'datetime' ,
'status_bjobs' : str ,
'start_time' : 'datetime',
'CR_ProjectID' : str ,
'CR_TaskID' : str ,
'CR_JobId' : str ,
'CPU_Architecture' : str ,
'Grafana_Tag' : str ,
'Project_Task_Tag' : str ,
'CRRunningStartTime' : 'datetime',
'new_status' : str ,
'PATH' : str ,
'CORNERVT' : str ,
'PACKAGE' : str ,
'INSTANCE' : str ,
'ARC' : str ,
'VT' : str ,
'CORNER' : str ,
'EXTRACTION' : str ,
'SIM_CFG' : str ,
'TOKEN_esti' : str ,
'MEM_REQ_SIZE_esti' : float,
'MAX_MEM_esti' : float,
'PATH_esti' : str ,
'delta_SIM_vs_esti' : float,
'%age_SIM_vs_ESTI' : float,
'eosFlow' : str ,
'userid' : str ,
'AetherShutdown' : bool,
'DatabaseLocked' : bool,
'MasterStatus' : str ,
'MEM_REQ_TYPE' : str ,
'MEM_REQ_SIZE' : float,
'CPU_CNT' : float,
'CPU_TIME' : float,
'MEM_USAGE' : float,
'HOST_ID' : str ,
'SUBMIT_TIME' : 'datetime',
'START_TIME' : 'datetime',
'END_TIME' : 'datetime',
'RESUBMIT_COUNT' : float ,
'MAX_CPU' : float ,
'MAX_MEM' : float,
'EXIT_INFO' : float,
'STATUS' : str ,
'RunTime' : float,
'TurnAroundTime' : float,
'RunTimeBin(Hrs)' : str
}
self.GraphParams = dict()
self.operators = [
["ge ", ">="],
["le ", "<="],
["lt ", "<"],
["gt ", ">"],
["ne ", "!="],
["eq ", "="],
["contains "],
["not_contains "],
["isin "],
["notin "],
["datestartswith "],
]
self.GraphTypeMap = {
"Bar": go.Bar,
"BarH": go.Bar,
"BarStacked": go.Bar,
"BarStackedH": go.Bar,
"Line": go.Scattergl,
"Area": go.Scatter,
"Scatter": go.Scattergl,
"Pie": go.Pie,
"Histogram": go.Histogram,
}
self.GraphModeMap = {
"Bar": "",
"BarH": "",
"BarStacked": "",
"BarStackedH": "",
"Pie": "",
"Histogram": "",
"Line": "lines",
"Area": "lines",
"Scatter": "markers",
}
self.aggregateFuncs = [
'mean',
'sum',
'count' ,
'std' ,
'var',
'sem',
'first',
'last',
'min',
'max'
]
self.NumericaggregateFuncs = [
'mean',
'sum',
'std' ,
'var',
'sem',
]
self.GraphParamsOrder2 = [
"Xaxis",
"GraphType",
"Primary_Yaxis",
"Primary_Legends",
"Aggregate_Func"
]
self.AvailableDataTypes= {
'string':str,
'int' : int,
'float': float,
'datetime' : 'datetime64[ns]',
'datetime_custom_format' : 'datetime64[ns]',
'boolean': bool
}
self.separatorMap={
"<tab>": "\t",
"<space>" : " ",
",<comma>": ",",
";<semi-colon>": ";",
":<colon>": ":",
"#<hash>": "#",
}
self.GraphParamsOrder = self.GraphParamsOrder2 + [ "Secondary_Legends"]
def read_file_in_df(self, FileInfo):
dtypes=self.loadMetadata(self.default_df_index,'ColumnsDataTypes')
mtime = os.path.getmtime(FileInfo['Path'])
if mtime > FileInfo['LastModified']:
Info("Reading file " + str(FileInfo['Path']) + " skiprows=" + str(FileInfo['SkipRows']) )
FileInfo['LastModified'] = mtime
if FileInfo['isXlsx']:
if FileInfo['Sheet']==None:
raise ValueError("SheetName is not defined")
df=pd.read_excel(FileInfo['Path'],sheet_name=FileInfo['Sheet'],skiprows=FileInfo['SkipRows'],dtype=dtypes)
df.columns = df.columns.astype(str)
DebugMsg3("DF head=", df.head())
else:
DebugMsg3("Reading File123")
sep= FileInfo['Sheet']
if FileInfo['Sheet']==None:
raise ValueError("Separator is not defined")
df=pd.read_csv(FileInfo['Path'], sep=self.separatorMap[sep],skiprows=FileInfo['SkipRows'],dtype=dtypes)
df.columns = df.columns.astype(str)
replace_dict=dict()
if FileInfo['ReplaceWithNan'] is not None:
for nan_value in FileInfo['ReplaceWithNan'].split(","):
replace_dict[nan_value]=np.nan
df = df.replace(replace_dict)
df = df.convert_dtypes(convert_integer=False,convert_floating=False,convert_string=False)
df = df.replace({pd.NA: np.nan})
self.DF_read_copy[FileInfo['Path']] = self.update_dtypes(df)
else:
Info("File not changed")
return self.DF_read_copy[FileInfo['Path']].copy()
def getDataFileName(self,datafile):
name= (datafile['Path'] + "#"
+ str(datafile['isXlsx']) + "#"
+ str(datafile['Sheet']) + "#"
+ str(datafile['SkipRows']) + "#"
+ str(datafile['ReplaceWithNan']) + "#"
)
return name
def update_df(self,Datafile,df_index):
self.df[df_index] = self.read_file_in_df(Datafile)
self.filtered_df[df_index] = self.df[df_index].copy()
self.plot_df[df_index]=self.filtered_df[df_index]
self.table_df=self.filtered_df[df_index]
def loadLastLoadedFiles(self):
filelist=dict()
if os.path.exists(self.RecentFilesListPath):
with open(self.RecentFilesListPath) as json_file:
filelist=json.load(json_file)
if "LastLoadedFile" in filelist:
for df_index in filelist["LastLoadedFile"]:
name=filelist["LastLoadedFile"][df_index]
self.DataFile[df_index]=filelist["recent"][name]
self.update_df(self.DataFile[df_index],df_index)
def updateRecentFiles(self,df_index):
filelist=dict()
if os.path.exists(self.RecentFilesListPath):
with open(self.RecentFilesListPath) as json_file:
filelist=json.load(json_file)
if "recent" not in filelist:
filelist["recent"]=dict()
if "LastLoadedFile" not in filelist:
filelist["LastLoadedFile"]=dict()
if self.DataFile[df_index] is not None:
name= self.getDataFileName(self.DataFile[df_index])
filelist["LastLoadedFile"][df_index]=name
filelist["recent"][name]=self.DataFile[df_index].copy()
filelist["recent"][name]['LastModified'] = 0
else:
del(filelist["LastLoadedFile"][df_index])
with open(self.RecentFilesListPath, "w") as outfile:
json.dump(filelist,outfile)
def readFileInitDash(self,df_index):
if self.df[df_index] is None:
if self.DataFile[df_index] is not None:
self.df[df_index] = self.read_file_in_df(self.DataFile[df_index])
else:
self.df[df_index]=pd.DataFrame()
self.figs = dict()
def get_groupid(self, group):
return "TopLevelID"
# return "-".join(group)
def hasDuplicates(self,df):
s=set()
i=0
for x in df.index:
i+=1
s.add(str(list(df.loc[x])))
if len(s) < i:
return True
return False
def extract_data(self, df , keep_cols=[]):
if len(self.GraphParams["Xaxis"]) ==0 or ( '#index' in self.GraphParams["Xaxis"]):
df['#index']=df.index.copy()
self.GraphParams["Xaxis"]=['#index']
DebugMsg("Test1",self.GraphParams['Xaxis'])
DebugMsg("Test1",self.GraphParams['Primary_Legends'])
filters_tmp_p = list(OrderedDict.fromkeys(self.GraphParams["Xaxis"] + self.GraphParams["Primary_Legends"]))
filters_tmp_p2=list(OrderedDict.fromkeys(filters_tmp_p + keep_cols))
DebugMsg("Test1 df columns",df.columns)
DebugMsg("Test1 filters_tmp_p2",filters_tmp_p2)
DebugMsg("Test1 filters_tmp_p",filters_tmp_p)
DebugMsg("Test1 keep_cols",keep_cols)
DebugMsg("Test1 Primary_Yaxis",self.GraphParams["Primary_Yaxis"])
DebugMsg("Test1 Scatter_Labels",self.GraphParams["Scatter_Labels"])
DebugMsg("Test1 Aggrega",self.GraphParams["Aggregate_Func"])
df1 = None
if len(self.GraphParams["Primary_Yaxis"]) > 0:
df_p = None
reqd_cols= list(OrderedDict.fromkeys(filters_tmp_p2 + self.GraphParams["Primary_Yaxis"] + self.GraphParams["Scatter_Labels"])) ## make list unique preserving order
if self.aggregate:
# for col in self.GraphParams["Primary_Legends"]:
# df[col] = df[col].astype(str).replace("nan", "#blank")
for col in (keep_cols + self.GraphParams["Scatter_Labels"] + self.GraphParams["Primary_Yaxis"]):
if col not in filters_tmp_p:
if self.GraphParams['Aggregate_Func'] in self.NumericaggregateFuncs:
df[col]=pd.to_numeric(df[col],errors='coerce')
df_p = (
df[ reqd_cols].groupby(filters_tmp_p)
.agg(self.GraphParams['Aggregate_Func'])
)
df_p=df_p.reset_index()
df_p=df_p[reqd_cols]
else:
if self.GraphParams['GraphType'] != 'Scatter' and self.hasDuplicates(df[filters_tmp_p]):
raise ValueError("Data contains duplicate values, Please use Aggregated Functions or plot a scatter chart")
df_p = df[reqd_cols]
#pass
df1 = df_p
DebugMsg("Test1 Aggrega",self.GraphParams["Aggregate_Func"])
# fig = make_subplots()
if df1 is not None:
if len(self.GraphParams["Xaxis"]) > 1:
self.newXAxisColName = "#" + "-".join(self.GraphParams["Xaxis"])
df1[self.newXAxisColName] = ""
df1 = df1.sort_values(by=self.GraphParams["Xaxis"])
for col in self.GraphParams["Xaxis"]:
df1[self.newXAxisColName] = (
df1[self.newXAxisColName] + df1[col].astype(str) + ","
)
elif len(self.GraphParams["Xaxis"])==1:
self.newXAxisColName = self.GraphParams["Xaxis"][0]
else :
self.newXAxisColName = "#index"
df1[self.newXAxisColName]=df1.index.copy()
return df1
def split_filter_part(self,filter_part):
for operator_type in self.operators:
for operator in operator_type:
if operator in filter_part:
ret_operator=operator_type[0].strip()
name_part, value_part = filter_part.split(operator, 1)
name = name_part[name_part.find("{") + 1 : name_part.rfind("}")]
value_part = value_part.strip()
v0 = value_part[0]
str_value=False
if v0 == value_part[-1] and v0 in ("'", '"', "`"):
value = value_part[1:-1].replace("\\" + v0, v0)
str_value=True
if ret_operator == 'contains' or ret_operator == 'not_contains':
value = str(value_part)
elif ret_operator == 'isin' or ret_operator == 'notin':
value = value_part.split(",")
elif not str_value:
try:
value = float(value_part)
except ValueError:
value = value_part
# word operators need spaces after them in the filter string,
# but we don't want these later
return name, ret_operator, value
return [None] * 3
def create_eval_func(self,df,filter_expr):
retval=filter_expr
DebugMsg("Filter Expr init: " , retval)
matches= re.findall("(\{)(\S*?)(}\s+contains\s+)(\"!\s+)(\S*)(\")",retval)
for groups in matches:
if
|
is_string_dtype(df[groups[1]])
|
pandas.api.types.is_string_dtype
|
"""Test the percentage column difference transformer."""
import numpy as np
import numpy.testing as nt
import pandas as pd
import pandas.testing as pt
import pytest
import src.preprocessing as pp
@pytest.fixture
def data():
data = {
'f1': np.array([100, 110, 98, 1500, 30]),
'f2': 100 * np.ones((5, )),
'f3': np.zeros((5, )),
'target1': 100 + np.arange(5),
'target2': 200 + np.arange(5),
}
return
|
pd.DataFrame(data)
|
pandas.DataFrame
|
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or
|
is_categorical_dtype(dtype)
|
pandas.core.dtypes.common.is_categorical_dtype
|
# coding: utf8
import os
from multiprocessing.pool import ThreadPool
from os import path
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
from clinica.pipelines.machine_learning import base
class KFoldCV(base.MLValidation):
def validate(self, y):
if self._validation_params["splits_indices"] is None:
skf = StratifiedKFold(
n_splits=self._validation_params["n_folds"], shuffle=True
)
self._validation_params["splits_indices"] = list(
skf.split(np.zeros(len(y)), y)
)
async_pool = ThreadPool(self._validation_params["n_threads"])
async_result = {}
for i in range(self._validation_params["n_folds"]):
train_index, test_index = self._validation_params["splits_indices"][i]
async_result[i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
async_pool.close()
async_pool.join()
for i in range(self._validation_params["n_folds"]):
self._validation_results.append(async_result[i].get())
self._classifier, self._best_params = self._ml_algorithm.apply_best_parameters(
self._validation_results
)
return self._classifier, self._best_params, self._validation_results
def save_results(self, output_dir):
if self._validation_results is None:
raise Exception(
"No results to save. Method validate() must be run before save_results()."
)
subjects_folds = []
results_folds = []
container_dir = path.join(output_dir, "folds")
if not path.exists(container_dir):
os.makedirs(container_dir)
for i in range(len(self._validation_results)):
subjects_df = pd.DataFrame(
{
"y": self._validation_results[i]["y"],
"y_hat": self._validation_results[i]["y_hat"],
"y_index": self._validation_results[i]["y_index"],
}
)
subjects_df.to_csv(
path.join(container_dir, "subjects_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
subjects_folds.append(subjects_df)
# fmt: off
results_df = pd.DataFrame(
{
"balanced_accuracy": self._validation_results[i]["evaluation"]["balanced_accuracy"],
"auc": self._validation_results[i]["auc"],
"accuracy": self._validation_results[i]["evaluation"]["accuracy"],
"sensitivity": self._validation_results[i]["evaluation"]["sensitivity"],
"specificity": self._validation_results[i]["evaluation"]["specificity"],
"ppv": self._validation_results[i]["evaluation"]["ppv"],
"npv": self._validation_results[i]["evaluation"]["npv"],
"train_balanced_accuracy": self._validation_results[i]["evaluation_train"]["balanced_accuracy"],
"train_accuracy": self._validation_results[i]["evaluation_train"]["accuracy"],
"train_sensitivity": self._validation_results[i]["evaluation_train"]["sensitivity"],
"train_specificity": self._validation_results[i]["evaluation_train"]["specificity"],
"train_ppv": self._validation_results[i]["evaluation_train"]["ppv"],
"train_npv": self._validation_results[i]["evaluation_train"]["npv"],
},
index=["i"],
)
# fmt: on
results_df.to_csv(
path.join(container_dir, "results_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
results_folds.append(results_df)
all_subjects = pd.concat(subjects_folds)
all_subjects.to_csv(
path.join(output_dir, "subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results = pd.concat(results_folds)
all_results.to_csv(
path.join(output_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results = pd.DataFrame(
all_results.apply(np.nanmean).to_dict(),
columns=all_results.columns,
index=[
0,
],
)
mean_results.to_csv(
path.join(output_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
print("Mean results of the classification:")
print(
"Balanced accuracy: %s"
% (mean_results["balanced_accuracy"].to_string(index=False))
)
print("specificity: %s" % (mean_results["specificity"].to_string(index=False)))
print("sensitivity: %s" % (mean_results["sensitivity"].to_string(index=False)))
print("auc: %s" % (mean_results["auc"].to_string(index=False)))
@staticmethod
def get_default_parameters():
parameters_dict = {
"n_folds": 10,
"n_threads": 15,
"splits_indices": None,
"inner_cv": True,
}
return parameters_dict
class RepeatedKFoldCV(base.MLValidation):
def validate(self, y):
if self._validation_params["splits_indices"] is None:
self._validation_params["splits_indices"] = []
for i in range(self._validation_params["n_iterations"]):
skf = StratifiedKFold(
n_splits=self._validation_params["n_folds"], shuffle=True
)
self._validation_params["splits_indices"].append(
list(skf.split(np.zeros(len(y)), y))
)
async_pool = ThreadPool(self._validation_params["n_threads"])
async_result = {}
for i in range(self._validation_params["n_iterations"]):
train_index, test_index = self._validation_params["splits_indices"][i]
async_result[i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
for r in range(self._validation_params["n_iterations"]):
async_result[r] = {}
self._validation_results.append([])
for i in range(self._validation_params["n_folds"]):
train_index, test_index = self._validation_params["splits_indices"][r][
i
]
async_result[r][i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
async_pool.close()
async_pool.join()
for r in range(self._validation_params["n_iterations"]):
for i in range(self._validation_params["n_folds"]):
self._validation_results[r].append(async_result[r][i].get())
# TODO Find a better way to estimate best parameter
flat_results = [result for fold in self._validation_results for result in fold]
self._classifier, self._best_params = self._ml_algorithm.apply_best_parameters(
flat_results
)
return self._classifier, self._best_params, self._validation_results
def save_results(self, output_dir):
if self._validation_results is None:
raise Exception(
"No results to save. Method validate() must be run before save_results()."
)
all_results_list = []
all_subjects_list = []
for iteration in range(len(self._validation_results)):
iteration_dir = path.join(output_dir, "iteration-" + str(iteration))
if not path.exists(iteration_dir):
os.makedirs(iteration_dir)
iteration_subjects_list = []
iteration_results_list = []
folds_dir = path.join(iteration_dir, "folds")
if not path.exists(folds_dir):
os.makedirs(folds_dir)
for i in range(len(self._validation_results[iteration])):
subjects_df = pd.DataFrame(
{
"y": self._validation_results[iteration][i]["y"],
"y_hat": self._validation_results[iteration][i]["y_hat"],
"y_index": self._validation_results[iteration][i]["y_index"],
}
)
subjects_df.to_csv(
path.join(folds_dir, "subjects_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
iteration_subjects_list.append(subjects_df)
# fmt: off
results_df = pd.DataFrame(
{
"balanced_accuracy": self._validation_results[iteration][i]["evaluation"]["balanced_accuracy"],
"auc": self._validation_results[iteration][i]["auc"],
"accuracy": self._validation_results[iteration][i]["evaluation"]["accuracy"],
"sensitivity": self._validation_results[iteration][i]["evaluation"]["sensitivity"],
"specificity": self._validation_results[iteration][i]["evaluation"]["specificity"],
"ppv": self._validation_results[iteration][i]["evaluation"]["ppv"],
"npv": self._validation_results[iteration][i]["evaluation"]["npv"],
"train_balanced_accuracy": self._validation_results[iteration][i]["evaluation_train"]["balanced_accuracy"],
"train_accuracy": self._validation_results[iteration][i]["evaluation_train"]["accuracy"],
"train_sensitivity": self._validation_results[iteration][i]["evaluation_train"]["sensitivity"],
"train_specificity": self._validation_results[iteration][i]["evaluation_train"]["specificity"],
"train_ppv": self._validation_results[iteration][i]["evaluation_train"]["ppv"],
"train_npv": self._validation_results[iteration][i]["evaluation_train"]["npv"],
},
index=["i"],
)
# fmt: on
results_df.to_csv(
path.join(folds_dir, "results_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
iteration_results_list.append(results_df)
iteration_subjects_df = pd.concat(iteration_subjects_list)
iteration_subjects_df.to_csv(
path.join(iteration_dir, "subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_subjects_list.append(iteration_subjects_df)
iteration_results_df = pd.concat(iteration_results_list)
iteration_results_df.to_csv(
path.join(iteration_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results_df = pd.DataFrame(
iteration_results_df.apply(np.nanmean).to_dict(),
columns=iteration_results_df.columns,
index=[
0,
],
)
mean_results_df.to_csv(
path.join(iteration_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results_list.append(mean_results_df)
all_subjects_df = pd.concat(all_subjects_list)
all_subjects_df.to_csv(
path.join(output_dir, "subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results_df = pd.concat(all_results_list)
all_results_df.to_csv(
path.join(output_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results_df = pd.DataFrame(
all_results_df.apply(np.nanmean).to_dict(),
columns=all_results_df.columns,
index=[
0,
],
)
mean_results_df.to_csv(
path.join(output_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
print("Mean results of the classification:")
print(
"Balanced accuracy: %s"
% (mean_results_df["balanced_accuracy"].to_string(index=False))
)
print(
"specificity: %s" % (mean_results_df["specificity"].to_string(index=False))
)
print(
"sensitivity: %s" % (mean_results_df["sensitivity"].to_string(index=False))
)
print("auc: %s" % (mean_results_df["auc"].to_string(index=False)))
@staticmethod
def get_default_parameters():
parameters_dict = {
"n_iterations": 100,
"n_folds": 10,
"n_threads": 15,
"splits_indices": None,
"inner_cv": True,
}
return parameters_dict
class RepeatedHoldOut(base.MLValidation):
def validate(self, y):
if self._validation_params["splits_indices"] is None:
splits = StratifiedShuffleSplit(
n_splits=self._validation_params["n_iterations"],
test_size=self._validation_params["test_size"],
)
self._validation_params["splits_indices"] = list(
splits.split(np.zeros(len(y)), y)
)
async_pool = ThreadPool(self._validation_params["n_threads"])
async_result = {}
for i in range(self._validation_params["n_iterations"]):
train_index, test_index = self._validation_params["splits_indices"][i]
if self._validation_params["inner_cv"]:
async_result[i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
else:
async_result[i] = async_pool.apply_async(
self._ml_algorithm.evaluate_no_cv, (train_index, test_index)
)
async_pool.close()
async_pool.join()
for i in range(self._validation_params["n_iterations"]):
self._validation_results.append(async_result[i].get())
self._classifier, self._best_params = self._ml_algorithm.apply_best_parameters(
self._validation_results
)
return self._classifier, self._best_params, self._validation_results
def save_results(self, output_dir):
if self._validation_results is None:
raise Exception(
"No results to save. Method validate() must be run before save_results()."
)
all_results_list = []
all_train_subjects_list = []
all_test_subjects_list = []
for iteration in range(len(self._validation_results)):
iteration_dir = path.join(output_dir, "iteration-" + str(iteration))
if not path.exists(iteration_dir):
os.makedirs(iteration_dir)
iteration_train_subjects_df = pd.DataFrame(
{
"iteration": iteration,
"y": self._validation_results[iteration]["y_train"],
"y_hat": self._validation_results[iteration]["y_hat_train"],
"subject_index": self._validation_results[iteration]["x_index"],
}
)
iteration_train_subjects_df.to_csv(
path.join(iteration_dir, "train_subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_train_subjects_list.append(iteration_train_subjects_df)
iteration_test_subjects_df = pd.DataFrame(
{
"iteration": iteration,
"y": self._validation_results[iteration]["y"],
"y_hat": self._validation_results[iteration]["y_hat"],
"subject_index": self._validation_results[iteration]["y_index"],
}
)
iteration_test_subjects_df.to_csv(
path.join(iteration_dir, "test_subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_test_subjects_list.append(iteration_test_subjects_df)
# fmt: off
iteration_results_df = pd.DataFrame(
{
"balanced_accuracy": self._validation_results[iteration]["evaluation"]["balanced_accuracy"],
"auc": self._validation_results[iteration]["auc"],
"accuracy": self._validation_results[iteration]["evaluation"]["accuracy"],
"sensitivity": self._validation_results[iteration]["evaluation"]["sensitivity"],
"specificity": self._validation_results[iteration]["evaluation"]["specificity"],
"ppv": self._validation_results[iteration]["evaluation"]["ppv"],
"npv": self._validation_results[iteration]["evaluation"]["npv"],
"train_balanced_accuracy": self._validation_results[iteration]["evaluation_train"]["balanced_accuracy"],
"train_accuracy": self._validation_results[iteration]["evaluation_train"]["accuracy"],
"train_sensitivity": self._validation_results[iteration]["evaluation_train"]["sensitivity"],
"train_specificity": self._validation_results[iteration]["evaluation_train"]["specificity"],
"train_ppv": self._validation_results[iteration]["evaluation_train"]["ppv"],
"train_npv": self._validation_results[iteration]["evaluation_train"]["npv"],
},
index=["i"],
)
# fmt: on
iteration_results_df.to_csv(
path.join(iteration_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
# mean_results_df = pd.DataFrame(iteration_results_df.apply(np.nanmean).to_dict(),
# columns=iteration_results_df.columns, index=[0, ])
# mean_results_df.to_csv(path.join(iteration_dir, 'mean_results.tsv'),
# index=False, sep='\t', encoding='utf-8')
all_results_list.append(iteration_results_df)
all_train_subjects_df = pd.concat(all_train_subjects_list)
all_train_subjects_df.to_csv(
path.join(output_dir, "train_subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_test_subjects_df = pd.concat(all_test_subjects_list)
all_test_subjects_df.to_csv(
path.join(output_dir, "test_subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results_df = pd.concat(all_results_list)
all_results_df.to_csv(
path.join(output_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results_df = pd.DataFrame(
all_results_df.apply(np.nanmean).to_dict(),
columns=all_results_df.columns,
index=[
0,
],
)
mean_results_df.to_csv(
path.join(output_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
print("Mean results of the classification:")
print(
"Balanced accuracy: %s"
% (mean_results_df["balanced_accuracy"].to_string(index=False))
)
print(
"specificity: %s" % (mean_results_df["specificity"].to_string(index=False))
)
print(
"sensitivity: %s" % (mean_results_df["sensitivity"].to_string(index=False))
)
print("auc: %s" % (mean_results_df["auc"].to_string(index=False)))
@staticmethod
def get_default_parameters():
parameters_dict = {
"n_iterations": 100,
"test_size": 0.2,
"n_threads": 15,
"splits_indices": None,
"inner_cv": True,
}
return parameters_dict
class LearningCurveRepeatedHoldOut(base.MLValidation):
def validate(self, y):
if self._validation_params["splits_indices"] is None:
splits = StratifiedShuffleSplit(
n_splits=self._validation_params["n_iterations"],
test_size=self._validation_params["test_size"],
)
self._validation_params["splits_indices"] = list(
splits.split(np.zeros(len(y)), y)
)
async_pool = ThreadPool(self._validation_params["n_threads"])
async_result = {}
for i in range(self._validation_params["n_iterations"]):
train_index, test_index = self._validation_params["splits_indices"][i]
async_result[i] = {}
skf = StratifiedKFold(
n_splits=self._validation_params["n_learning_points"], shuffle=False
)
inner_cv_splits = list(
skf.split(np.zeros(len(y[train_index])), y[train_index])
)
for j in range(self._validation_params["n_learning_points"]):
inner_train_index = np.concatenate(
[indexes[1] for indexes in inner_cv_splits[: j + 1]]
).ravel()
async_result[i][j] = async_pool.apply_async(
self._ml_algorithm.evaluate,
(train_index[inner_train_index], test_index),
)
async_pool.close()
async_pool.join()
for j in range(self._validation_params["n_learning_points"]):
learning_point_results = []
for i in range(self._validation_params["n_iterations"]):
learning_point_results.append(async_result[i][j].get())
self._validation_results.append(learning_point_results)
self._classifier = []
self._best_params = []
for j in range(self._validation_params["n_learning_points"]):
classifier, best_params = self._ml_algorithm.apply_best_parameters(
self._validation_results[j]
)
self._classifier.append(classifier)
self._best_params.append(best_params)
return self._classifier, self._best_params, self._validation_results
def save_results(self, output_dir):
if self._validation_results is None:
raise Exception(
"No results to save. Method validate() must be run before save_results()."
)
for learning_point in range(self._validation_params["n_learning_points"]):
all_results_list = []
all_subjects_list = []
learning_point_dir = path.join(
output_dir, "learning_split-" + str(learning_point)
)
for iteration in range(self._validation_params["n_iterations"]):
iteration_dir = path.join(
learning_point_dir, "iteration-" + str(iteration)
)
if not path.exists(iteration_dir):
os.makedirs(iteration_dir)
iteration_subjects_df = pd.DataFrame(
{
"y": self._validation_results[learning_point][iteration]["y"],
"y_hat": self._validation_results[learning_point][iteration][
"y_hat"
],
"y_index": self._validation_results[learning_point][iteration][
"y_index"
],
}
)
iteration_subjects_df.to_csv(
path.join(iteration_dir, "subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_subjects_list.append(iteration_subjects_df)
# fmt: off
iteration_results_df = pd.DataFrame(
{
"balanced_accuracy": self._validation_results[learning_point][iteration]["evaluation"]["balanced_accuracy"],
"auc": self._validation_results[learning_point][iteration]["auc"],
"accuracy": self._validation_results[learning_point][iteration]["evaluation"]["accuracy"],
"sensitivity": self._validation_results[learning_point][iteration]["evaluation"]["sensitivity"],
"specificity": self._validation_results[learning_point][iteration]["evaluation"]["specificity"],
"ppv": self._validation_results[learning_point][iteration]["evaluation"]["ppv"],
"npv": self._validation_results[learning_point][iteration]["evaluation"]["npv"],
"train_balanced_accuracy": self._validation_results[learning_point][iteration]["evaluation_train"]["balanced_accuracy"],
"train_accuracy": self._validation_results[learning_point][iteration]["evaluation_train"]["accuracy"],
"train_sensitivity": self._validation_results[learning_point][iteration]["evaluation_train"]["sensitivity"],
"train_specificity": self._validation_results[learning_point][iteration]["evaluation_train"]["specificity"],
"train_ppv": self._validation_results[learning_point][iteration]["evaluation_train"]["ppv"],
"train_npv": self._validation_results[learning_point][iteration]["evaluation_train"]["npv"],
},
index=["i"],
)
# fmt: on
iteration_results_df.to_csv(
path.join(iteration_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results_df = pd.DataFrame(
iteration_results_df.apply(np.nanmean).to_dict(),
columns=iteration_results_df.columns,
index=[
0,
],
)
mean_results_df.to_csv(
path.join(iteration_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results_list.append(mean_results_df)
all_subjects_df = pd.concat(all_subjects_list)
all_subjects_df.to_csv(
path.join(learning_point_dir, "subjects.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
all_results_df = pd.concat(all_results_list)
all_results_df.to_csv(
path.join(learning_point_dir, "results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
mean_results_df = pd.DataFrame(
all_results_df.apply(np.nanmean).to_dict(),
columns=all_results_df.columns,
index=[
0,
],
)
mean_results_df.to_csv(
path.join(learning_point_dir, "mean_results.tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
@staticmethod
def get_default_parameters():
parameters_dict = {
"n_iterations": 100,
"test_size": 0.2,
"n_learning_points": 10,
"n_threads": 15,
"splits_indices": None,
"inner_cv": True,
}
return parameters_dict
class RepeatedKFoldCV_Multiclass(base.MLValidation):
def __init__(self, ml_algorithm):
self._ml_algorithm = ml_algorithm
self._repeated_validation_results = []
self._classifier = None
self._best_params = None
self._cv = None
def validate(self, y, n_iterations=100, n_folds=10, n_threads=15):
async_pool = ThreadPool(self._validation_params["n_threads"])
async_result = {}
self._cv = []
for r in range(n_iterations):
skf = StratifiedKFold(n_splits=n_folds, shuffle=True)
self._cv.append(list(skf.split(np.zeros(len(y)), y)))
async_result[r] = {}
self._repeated_validation_results.append([])
for i in range(n_folds):
train_index, test_index = self._cv[r][i]
async_result[r][i] = async_pool.apply_async(
self._ml_algorithm.evaluate, (train_index, test_index)
)
async_pool.close()
async_pool.join()
for r in range(n_iterations):
for i in range(n_folds):
self._repeated_validation_results[r].append(async_result[r][i].get())
# TODO Find a better way to estimate best parameter
flat_results = [
result for fold in self._repeated_validation_results for result in fold
]
self._classifier, self._best_params = self._ml_algorithm.apply_best_parameters(
flat_results
)
return self._classifier, self._best_params, self._repeated_validation_results
def save_results(self, output_dir):
if self._repeated_validation_results is None:
raise Exception(
"No results to save. Method validate() must be run before save_results()."
)
all_results_list = []
all_subjects_list = []
for iteration in range(len(self._repeated_validation_results)):
iteration_dir = path.join(output_dir, "iteration-" + str(iteration))
if not path.exists(iteration_dir):
os.makedirs(iteration_dir)
iteration_subjects_list = []
iteration_results_list = []
folds_dir = path.join(iteration_dir, "folds")
if not path.exists(folds_dir):
os.makedirs(folds_dir)
for i in range(len(self._repeated_validation_results[iteration])):
subjects_df = pd.DataFrame(
{
"y": self._repeated_validation_results[iteration][i]["y"],
"y_hat": self._repeated_validation_results[iteration][i][
"y_hat"
],
"y_index": self._repeated_validation_results[iteration][i][
"y_index"
],
}
)
subjects_df.to_csv(
path.join(folds_dir, "subjects_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
iteration_subjects_list.append(subjects_df)
# fmt: off
results_df = pd.DataFrame(
{
"balanced_accuracy": self._repeated_validation_results[iteration][i]["evaluation"]["balanced_accuracy"],
"accuracy": self._repeated_validation_results[iteration][i]["evaluation"]["accuracy"],
"train_balanced_accuracy": self._repeated_validation_results[iteration][i]["evaluation_train"]["balanced_accuracy"],
"train_accuracy": self._repeated_validation_results[iteration][i]["evaluation_train"]["accuracy"],
},
index=[
"i",
],
)
# fmt: on
results_df.to_csv(
path.join(folds_dir, "results_fold-" + str(i) + ".tsv"),
index=False,
sep="\t",
encoding="utf-8",
)
iteration_results_list.append(results_df)
iteration_subjects_df =
|
pd.concat(iteration_subjects_list)
|
pandas.concat
|
#demographic data
#metrics:
# population
# wealth index
# trump-vote
import requests
import json
import pandas as pd
import csv
from pandas.io.json import json_normalize
import numpy as np
def read_move_data():
move_data = pd.DataFrame()
ds = pd.read_json("move.json")
ds.columns = ['county', 'move_index', 'state',
'sab', 'fips']
move_data = move_data.append(ds)
return move_data
def read_vote_data():
votes = pd.DataFrame()
ds = pd.read_csv("trump-vote.csv", header = 0)
ds.columns = ["index", "votes_dem", "votes_gop", "total_votes",
"per_dem", "per_gop", "diff", "per_point_diff",
"state_abbr", "county_name", "fips"]
votes = votes.append(ds)
return votes
def read_income_data():
wealth = pd.DataFrame()
ds =
|
pd.read_csv("income.csv")
|
pandas.read_csv
|
"""
.. module:: assembler
:synopsis: assemble genes from RNASeq data (normalized genome coverage (bigwig) and junctions)
.. moduleauthor:: <NAME> <<EMAIL>>
"""
# system imports
import subprocess
import multiprocessing
import gzip
import os
import time
from functools import reduce
from operator import iadd, iand
from collections import Counter
from itertools import repeat
import logging
logging.basicConfig(level=logging.DEBUG)
LOG = logging.getLogger(__name__)
# 3rd party imports
import pandas as PD
import numpy as N
import matplotlib.pyplot as P
# library imports
from jgem import utils as UT
from jgem import bedtools as BT
from jgem import bigwig as BW
from jgem import gtfgffbed as GGB
from jgem import filenames as FN
from jgem import graph as GP
from jgem import calccov as CC
from jgem import convert as CV
# Assemblers ###############################################################
PARAMS = dict(
merging=False,
genome='mm10', # genome id
selectsj_ratio=1e-3, # ratio for selecting overlapping junctions
checksjsupport=False, # whether check splice junctions without read support (necessary when merging or binth>0)
binth=0, # bigwig to bed threshold
jie_binth=10, #16,
jie_sjth=100,
#jie_ratio=0.01, # junctions in a exon ratio to surrounding junctions which define the exon
ureadth=0, # SJ uniq read (can be -1 to use uread=0 but mread>0)
mreadth=0, # SJ non-uniq read threshold
# SJ is selected as (uread>ureadth | mread>mreadth), so ureadth should really be >=0
maxexonsize=35000,# max exon size (Ttn has ~20kbp exon)
edgesize=100, # temporary edge exon size
mpth=0.95, # mapped% th for detecting gene boundary
cutlen=350, # check candidate bounded exons larger than this for cutting
gap=50,# fill this gap to distinguish gene boundary vs. exon (fill exon but not intergenic)
gap5=50, #150,#300, # for 5' UTR extension
gap3=50, #500, #1000,# for 3' UTR extension
covfactor=0.1, # for gap filling: if coverage of the next interval is < covfactor*current cov
# then don't fill
binstrand='.',
iret_mpth=0.98,# mapped% th for detecting intron retension
iret_covratio=0.01, # min cov ratio between an iret and average of surrounding exons
iret_covth=0.1, #0.005, # if intron cov smaller than this, then ignore
findsecovth=True, # whether to use adaptive secovth (pndr2)
secov_fpr_th=0.001, # FPR
minsecovth=5, #0.1,# minimum single exon coverage (normalized to million alignments)
secovth=10, #0.5, # default SE cov threshold if not using adaptive version (pndr1)
se_gap=170, #50,# single exon gap fill
se_sizeth=50,# single exon size th
se_sizeth2=200, # for SELECTSEME
se_binth=0.2, #0.01,
# adaptive threshold is calculated and bigger of this and calculated value is used
# se_th99, FINDSE_secovth in stats is the calculated value
findsecovth_useref=True, # whether to use ref for finding secov, if not use ME
savepndr1=True, # whether to save pndr1 (no adaptive secovth) when doing pndr2
find53ir_covratio=0.2, # cov ratio threshold for FIND53IR
find53ir_covth=0.6, #0.03, # cov threhold for FIND53IR
remove_overlappingse=True,# whether to remove SE overlapping to ME
remove_bad2exon=True,# whether to perform bad 2 exon gene removal
me2exon_sjth=2, # 2exon genes with splice junction support less than this will be removed
me2exon_sizeth=200,# 2exon genes terminal size th ~ 2 x read length
me2exon_covth=10, #0.5,# 2exon genes cov th
ed_window=15, # edge detector smooth window (in bp)
ed_minth=0.5, # edge detector minth for smoothed derivative
ed_sigma=3, # edge detector sigma for threshold (larger of minth, sigma*std is used)
ed_covratio=0.001, # edge detector if cov ratio to surrounds is < covratio, discard
# for abundant one covratio needs to be small to retrieve
ed_covth=2, #0.1,# edge detector abs cov threshold
ed_smwinsize=151, # smooth window for trimming
ed_minintsize=10, # window for merging peaks
ed_aggratio=0.1, #
ed_mimath=0.15, # min-max ratio threshold for cut decision
ed_mimath2=0.30,
ed_triggerth=2, # for rise detection (detect rise when >= max*mimath*triggerth after <max*mima)
#printerr=True, # mostly for FIXEDGES part
override=False,# override previously saved calculation
np=1, # number of process to spawn for connected component calculation
writegtf=False,
writeiso=False,
maxisonum=10,
useallconnected=True,
do_selectseme=False,
do_mergeexons=False,
clustersep_factor=1e-3,
)
# use different parameters for merging
MPARAMDIFF = dict(
merging = True,
checksjsupport=True,
binth=0, #0.001
mpth=0.9999,
gap=0,
gap5=1,
gap3=1,
# findsecovth=False, # 2016-04-30 turn it on
minsecovth=5,
# secovth=0.05,
# se_binth=0.01,
se_gap=0,
# se_sizeth=10,
ed_covratio=0.05,
ed_minth=0.05,
ed_mimath=0.20,
ed_mimath2=0.75,
ed_sigma=5,
# ed_covth=0.001,
iret_mpth=1, #0.9999,
iret_covratio=0.1,
# iret_covth=1, #0.01,
find53ir_covratio=0.15,
# find53ir_covth=0.03,
me_p0=-2,
me_p1=1,
me_p2=3.1,
do_selectseme=False,
do_iretinmerge=False,
)
MPARAMS = PARAMS.copy()
MPARAMS.update(MPARAMDIFF)
# for documentation purpose
PARAMSDOC = PARAMS.copy()
for k,v in MPARAMS.items():
PARAMSDOC[k+'_m'] = v
#LOG.debug(PARAMSDOC)
# [TODO] Better 5',3' detection, check all internal exon, not just cut ones
# (current source of 5',3': edge, cut exons, SE attach)
# [TODO] Replace WRITEGENES with make_unionex and unionex2bed12
# [TODO] Remove all part trying to use cache (previous calculation)
# Speed is not problem with multi CPU calculation anymore.
# Using previous leftover (often with error) is just a cause of another error.
class Assembler(object):
def __init__(self, fnobj, merging=False, saveintermediates=False, **kwargs):
"""
Args:
fnobj: FileNames object
merging (bool): whether merging assembled models or not (default False)
saveintermediates (bool): whether to save intermediates (default True)
kwargs: to change any of the parameter values
"""
self.fnobj=fnobj
if merging:
pr = MPARAMS.copy()
else:
pr = PARAMS.copy()
self.saveintermediates = saveintermediates
pr.update(kwargs)
self.params = pr
self.stats = {}
# def delete_intermediates(self):
# "Delete intermediate files."
# fn = self.fnobj
# categories = [x for x in fn._fnames.keys() if x !='output']
# fn.delete(delete=categories, protect=['output'])
def check_params(self):
"Check parameter change (if run previously) and save current parameter set."
fn = self.fnobj
pr = self.params.copy()
del pr['override']
del pr['np']
# parameter check
if (pr['binth']>0) or (pr['merging']):
# always do consistency check if binth>0 (since support may be reduced)
pr['checksjsupport'] = self.params['checksjsupport']=True
prdf = PD.DataFrame(pr,index=['value']).T
fname = fn.fname('assemble.params.txt', category='stats')
if os.path.exists(fname):
prdf0 = UT.read_pandas(fname,index_col=[0])
if len(prdf)==len(prdf0):
if not all(prdf['value'].astype(str) == prdf0['value'].astype(str)):
self.params['override']=True
LOG.warning('parameter different overriding...')
for x in prdf.index:
p1 = str(prdf.ix[x]['value'])
p2 = str(prdf0.ix[x]['value'])
if p1 != p2:
LOG.info(' {0}: {1} <=> {2}'.format(x,p1,p2))
else:
self.params['override']=True
LOG.warning('parameter set changed, overriding...')
p1 = set(prdf.index.values)
p2 = set(prdf0.index.values)
a = p1.difference(p2)
b = p2.difference(p1)
LOG.info(' old({0}) new({1})'.format(a,b))
# save parameters
UT.write_pandas(prdf, fname, 'ih')
def save_stats(self):
""" Saves assemble related stats into (samplename).assemble.stats.txt. """
df = PD.DataFrame(self.stats, index=['value']).T
fname = self.fnobj.fname('assemble.stats.txt',category='stats')
UT.write_pandas(df, fname, 'ih')
def assemble(self):
""" Perform the assembly """
fn = self.fnobj
pr = self.params
st = self.stats
self.check_params()
self.sj = GGB.read_bed(fn.sjfile)
if not pr['merging']:
SELECTSJ(self)()
REMOVEJIE(self)()
if pr['checksjsupport']:
CHECKSJSUPPORT(self)()
SJ2EX(self)()
# ADDJIE(self)() # no need to do this
if pr['do_mergeexons']:
MERGEEXONS(self)()
# CLUSTERSEPARATOR(self)()
if pr['merging']:
FINDEDGES2(self)()
FIXSTRAND(self)()
# FIND53IR only deals with exons with length > SE sizeth = 50bp)
# gap smaller than this will be missed if we skip FINDIRETS
if pr['do_iretinmerge']:
FINDIRETS(self)()
# FINDSECOVTH(self)() #
FIND53IR(self)() # max(minsecov, secovth) is used to get SE here
else:
FINDEDGES(self)()
FIXSTRAND(self)()
EDGEFIXER(self)()
FINDIRETS(self)()
FINDSECOVTH(self)()
FINDSE(self)()
CALCCOV(self)()
SETINFO(self)() # SET ACCEPTOR/DONOR/EXON CATEGORY
FINDGENES(self)()
if pr['do_selectseme']:
SELECTSEME(self)()
# SELECTSEME2(self)() # SELECT ME and SE, saves exname2, exname3, sjname2
if not pr['merging']:
FIXEDGES2(self)() # TRIM 3',5' edges
CONSISTENTSJ(self)() # remove sj without ex support
WRITESJEX(self)()
WRITEGENES(self)()
self.save_stats()
if not self.saveintermediates:
# self.delete_intermediates()
fn.delete(delete=['temp'], protect=['output','stats'])
# assembler modules #######################################################
# def mp_worker(args):
# func, arg = args
# return func(*arg)
class SUBASE(object):
"""Base class of assembler modules."""
def __init__(self, asm):
self.asm = asm # assembler
self.fnobj = asm.fnobj # FileNames object
self.params = asm.params # dict
self.stats = asm.stats # dict
self.info = ''
def __call__(self, *args, **kwargs):
LOG.info('{0} '.format(self.__class__.__name__)+'='*20)
_sttime = time.time()
rslt = self.call(*args, **kwargs)
if self.info:
LOG.info(' '+self.info)
LOG.info(' time: {0:.3f}s'.format(time.time()-_sttime))
def call(self, *args, **kwargs):
raise NotImplementedError
def bw2bed(self, binth):
fn = self.fnobj
pr = self.params
binfile = BW.bw2bed(
bwfile=fn.bwfile,
bedfile=fn.bedname2('bw',binth),
chroms=UT.chroms(pr['genome']),
th=binth
)
return binfile
def fillgap(self, gap, binfile):
fn = self.fnobj
if gap==0:
gapfile=binfile
else:
gapfile = BT.fillgap(binfile,fn.bedname2('gap', gap), gap=gap)
return gapfile
def chroms(self, df):
pr = self.params
chroms0 = set(df['chr'].unique())
return [x for x in UT.chroms(pr['genome']) if x in chroms0]
# def _process_mp(self, func, args):
# np = self.params['np']
# rslts = []
# if np==1:
# for i, arg in enumerate(args):
# rslts += func(*arg)
# LOG.debug(' processing: {0}/{1}...'.format(i+1,len(args)))
# else:
# try:
# p = multiprocessing.Pool(np)
# a = zip(repeat(func), args)
# tmp = p.map(mp_worker, a)
# finally:
# LOG.debug('closing pool')
# p.close()
# rslts = reduce(iadd, tmp)
# return rslts
def sjfile(self):
fn = self.fnobj
pr = self.params
fname0 = fn.bedname('checksjsupport.sj')
fname1 = fn.bedname('fixstrand.sj')
if os.path.exists(fname1):
return fname1
if pr['checksjsupport'] and os.path.exists(fname0):
return fname0
return fn.sjfile
class SELECTSJ(SUBASE):
"""Select splice junction based on ratio of junction counts within overlapping junctions.
Use ratio of unique counts and ratio of total counts (to incorporate cases where there are
only non-unique counts).
Args:
sj: junction DataFrame
Returns:
:sj: selected junctions dataframe
Related Parameters:
* selectsj_ratio: thredhold for the ratio, default:{selectsj_ratio}
Files:
* selectsj.bed.gz
* selectsj.inte.txt.gz
"""
def call(self):
sj = self.asm.sj
fn = self.fnobj
pr = self.params
a = b = GGB.write_bed(sj, fn.bedname('selectsj'), ncols=7)
c = fn.txtname('selectsj.inte')
c = BT.bedtoolintersect(a,b,c,wao=True) # somehow -s (force same strand) doesn't work
o = BT.read_ovl(c, GGB.BEDCOLS[:7])
idxstrand = o['strand']==o['b_strand']
LOG.debug(a)
LOG.debug(c)
LOG.debug(o.head())
sjgr = o.groupby(['chr','st','ed','strand'])
# BEDCOLUMNS sc1(5th), tst(7th) contains unique count (ucnt) and non-unique count (mcnt)
sj2 = sjgr[['name','sc1','tst']].first()
sj2['ucnt_sum'] = sjgr['b_sc1'].sum()
sj2['mcnt_sum'] = sjgr['b_tst'].sum()
sj2['sum'] = sj2['ucnt_sum']+sj2['mcnt_sum']
sj2['cnt'] = sj2['sc1']+sj2['tst']
self.sj2 = sj2 = sj2.reset_index()
sj2['locus'] = UT.calc_locus_strand(sj2)
sj2['ratio'] = sj2['sc1']/sj2['ucnt_sum']
sj2['ratio_m'] = sj2['tst']/sj2['mcnt_sum']
sj2['ratio_a'] = sj2['cnt']/sj2['sum']
self.sj2 = sj2
UT.write_pandas(sj2, fn.txtname('selectsj.sj2',category='output'), 'h') # TODO change cat to 'temp'
# select
th_ratio = pr['selectsj_ratio']
idx1 = (sj2['ratio']>=th_ratio)|(sj2['ratio_a']>=th_ratio)
self.sj4 = sj4 = sj2[idx1]
self.info = '#sj:{0}=>{1}'.format(len(sj), len(sj4))
self.stats['SELECTSJ.#sj0'] = len(sj)
self.stats['SELECTSJ.#sj'] = len(sj4)
#return sj4
self.asm.sj = sj4
class CHECKSJSUPPORT(SUBASE):
"""Check junction edges have >0 coverages. Remove junctions without support.
Args:
sj: junction DataFrame
Returns:
:sj: compatible junctions
Related Parameters:
* binth: coverage threshold, default:{binth}, (for merging: {binth_m})
* genome: genome version, default:{genome}
TempFiles:
* bw*.bed.gz
* sjst.bed.gz
* sjed.bed.gz
* sjst.ovl.txt.gz
* sjed.ovl.txt.gz
"""
def call(self):
sj = self.asm.sj
fn = self.fnobj
pr = self.params
# get bindf
binfile = self.bw2bed(pr['binth'])
# write st,ed from sjs
sj['st-1']=sj['st']-1
sj['st-2']=sj['st']-2
sj['ed+1']=sj['ed']+1
sj['_id']=N.arange(len(sj))
sjst = fn.bedname('checksjsupport.sjst')
sjed = fn.bedname('checksjsupport.sjed')
# BEDCOLS: chr,st,ed,name,sc1,strand,tst
# UT.write_pandas(sj[['chr','st-2','st-1','_id','sc1','strand','tst']],sjst,'')
# UT.write_pandas(sj[['chr','ed','ed+1','_id','sc1','strand','tst']],sjed,'')
UT.write_pandas(sj[['chr','st-2','st-1','_id']],sjst,'')
UT.write_pandas(sj[['chr','ed','ed+1','_id']],sjed,'')
stovl = fn.txtname('checksjsupport.sjst.ovl')
edovl = fn.txtname('checksjsupport.sjed.ovl')
# bedtools intersect to bindf
# ost = BT.calc_ovlratio(sjst, binfile, stovl, nacol=7, nbcol=3, idcol='name')
# oed = BT.calc_ovlratio(sjed, binfile, edovl, nacol=7, nbcol=3, idcol='name')
ost = BT.calc_ovlratio(sjst, binfile, stovl, nacol=4, nbcol=3, idcol='name')
oed = BT.calc_ovlratio(sjed, binfile, edovl, nacol=4, nbcol=3, idcol='name')
ost = ost.set_index('name')
oed = oed.set_index('name')
sjsupp = sj.set_index('_id')[(ost['ovlratio']>0)&(oed['ovlratio']>0)].copy()
self.info ='#sj: {0}=>{1}'.format(len(sj), len(sjsupp))
self.stats['CHECKSJSUPPORT.#sj'] = len(sj)
self.stats['CHECKSJSUPPORT.#sjsupp'] = len(sjsupp)
#return sjsupp
fn.write_bed(sjsupp, 'checksjsupport.sj', ncols=7)
self.asm.sj = sjsupp
class REMOVEJIE(SUBASE):
"""Remove Junctions In Exons. Often times exons with high coverage contain
noise junctions.
Args:
sj: junction DataFrame
Returns:
:sj: junction dataframe without JIE
:jie: junctions in exons
Related Parameters:
* jie_binth: coverage threshold, default:{jie_binth}
* jie_sjth: threshold for normalized read counts, default:{genome}
TempFiles:
* bw*.bed.gz
* sjsupp.bed.gz
* jie.bw.ovl.txt.gz
"""
def call(self):
sj = self.asm.sj
sjfile = self.sjfile()
fn = self.fnobj
pr = self.params
stats = self.stats
acov = BW.get_totbp_covbp_bw(fn.bwfile, pr['genome'], ['chr1']).ix['acov'].values[0]
jie_binth = pr['jie_binth']*acov
jie_sjth = pr['jie_sjth'] #*acov # seems no need to scale sjth
stats['REMOVEJIE.jie_binth'] = jie_binth
stats['REMOVEJIE.jie_sjth'] = jie_sjth
stats['REMOVEJIE.acov'] = acov
# covarage file
binfile = self.bw2bed(jie_binth)
# if nothing in binfile then skip
try:
jiebw = GGB.read_bed(binfile)
except:
self.asm.jie = None
self.info = 'nothing above jie_binth {0}'.format(pr['jie_binth'])
stats['REMOVEJIE.#sj'] = len(sj)
stats['REMOVEJIE.#jie'] = 0
return
if len(jiebw)==0:
self.asm.jie = None
self.info = 'nothing above jie_binth {0}'.format(pr['jie_binth'])
stats['REMOVEJIE.#sj'] = len(sj)
stats['REMOVEJIE.#jie'] = 0
return
sjmp = BT.calc_ovlratio(
aname=sjfile,
bname=binfile,
tname=fn.txtname('removejie.bw.ovl'),
nacol=7, nbcol=3,
idcol=['chr','st','ed','strand']
)
# match records between sjmp and mg.sj
sjmp['str_id'] = UT.calc_locus(sjmp)
sj['str_id'] = UT.calc_locus(sj)
sid2ovl = UT.df2dict(sjmp, 'str_id','ovlratio')
sj['ovlratio'] = [sid2ovl.get(x,N.nan) for x in sj['str_id']]
# should use count ratios instead of actual reads as threshold ?
th = pr['jie_sjth']
idx = (sj['ovlratio']==1)&(sj['sc1']<th)&(sj['tst']<th)
sj1 = sj[~idx].copy() # use these for "nearest donor/acceptor" exon extraction
jie = sj[idx].copy() # junctions in exon, add later
self.info = '#sj:{0}=>{1}, jie {2}'.format(len(sj), len(sj1), len(jie))
stats['REMOVEJIE.#sj'] = len(sj1)
stats['REMOVEJIE.#jie'] = len(jie)
#return sj1, jie
self.asm.sj = sj1
self.asm.jie = jie
class SJ2EX(SUBASE):
"""Find candidate exons from junctions.
Args:
sj: junction DataFrame
Returns:
:me: exons dataframe
:sj: compatible junctions
Related Parameters:
* ureadth: threshold for junction unique counts, default:{ureadth} ({ureadth_m} for merging)
* mreadth: threshold for non-unique junction counts, default:{mreadth} ({mreadth_m} for merging)
* maxexonsize: maximum exon size, default:{maxexonsize}
* edgesize: temporary edge exon size, default:{edgesize}
"""
def call(self):
sj = self.asm.sj
fn = self.fnobj
pr = self.params
ureadth=pr['ureadth']
mreadth=pr['mreadth']
sj['st-1'] = sj['st']-1
sj['ureads'] = sj['sc1']
sj['mreads'] = sj['tst']
sj1 = sj[(sj['ureads']>ureadth)|(sj['mreads']>mreadth)].copy()
LOG.info('#sj:{0}=>{1} after ureadth, mreadth'.format(len(sj), len(sj1)))
self.stats['SJ2EX.#sj_before_uth_mth'] = len(sj)
self.stats['SJ2EX.#sj_after_uth_mth'] = len(sj1)
ex = PD.DataFrame([x for x in self._sj2ex(sj1)],columns=GGB.BEDCOLS[:6])
# there are small cases of duplicates
ex = ex.sort_values(['chr','st','ed','strand'])
ex['_ord'] = -ex['name'].str.len() # bring bounded both [...] to front (others lack)
exg = ex.groupby(['chr','st','ed','_ord'])
ex = exg.first().reset_index()
self.info = '#ex:{0}, #sj:{1}'.format(len(ex), len(sj1))
self.stats['SJ2EX.#exon_candidates'] = len(ex)
#return ex, sj1
self.asm.me = ex
self.asm.sj = sj1
def _sj2ex(self, sj):
pr = self.params
maxsize=pr['maxexonsize']
if pr['merging']:
edgesize = maxsize
else:
edgesize=pr['edgesize']
counter = [0]
# workaround for nonlocal closure variable for Python2
# in Python3 use nonlocal keyword
def sj2expn(strand):
# strand +,-
df = sj[(sj['strand']==strand)]
# chrom wise
for chrom, g in df.groupby('chr'):
sts = sorted(set(g['st-1'].values)) # right end of exon (ed)
eds = sorted(set(g['ed'].values)) # left end of exon (st)
eds = [max(0,sts[0]-edgesize)]+list(eds)
nsts = len(sts)
neds = len(eds)
j = 0 # current left end pos
usededs = set()
for i in range(nsts): # go through right ends
while (eds[j+1]<sts[i]): # find nearest left end
j += 1
if j==0:
usededs.add(j)
counter[0] +=1
name = '{0}{1}s]'.format(strand,counter[0])
yield (chrom, eds[j], sts[i], name, 0, strand)
elif (sts[i]-eds[j])>maxsize: # too far
counter[0] +=1
name = '{0}{1}f]'.format(strand,counter[0])
yield (chrom, sts[i]-edgesize, sts[i], name, 0, strand)
else: # bounded on both
usededs.add(j)
counter[0] +=1
name = '[{0}{1}a]'.format(strand,counter[0])
yield (chrom, eds[j], sts[i], name, 0, strand)
unusededs = sorted(set(range(neds)).difference(usededs))
i = 0
sts = list(sts)+[eds[-1]+edgesize]
for j in unusededs:
if j==0: # dummy ed
continue
while (sts[i+1]<=eds[j]):
i += 1
if i+1==nsts:# len(sts)=nsts+1
counter[0] +=1
name = '[{0}{1}e'.format(strand,counter[0])
yield (chrom, eds[j], sts[i+1], name, 0, strand)
elif (sts[i+1]-eds[j])>maxsize: # too far
counter[0] +=1
name = '[{0}{1}f'.format(strand,counter[0])
yield (chrom, eds[j], eds[j]+edgesize, name, 0, strand)
else: # bounded on both sides
counter[0] +=1
name = '[{0}{1}b]'.format(strand,counter[0])
yield (chrom, eds[j], sts[i+1], name, 0, strand)
def sj2exns():
sj0 = sj
df = sj[(sj['strand']=='.')]
# chrom wise
for chrom, g in df.groupby('chr'):
sts = sorted(set(g['st-1'].values)) # right end of exon (ed) for '.'
sjchr = sj0[sj0['chr']==chrom]#.copy()
#sjchr['st'] = sjchr['st']-1
tmp = [tuple(x) for x in sjchr[['st-1','strand']].values.tolist()]
sts0 = sorted(set(tmp)) # all right end of exon (ed)+strand
tmp = [tuple(x) for x in sjchr[['ed','strand']].values.tolist()]
eds0 = sorted(set(tmp)) # all left end of exon (st)+strand
sts0 = sts0+[(eds0[-1][0]+edgesize,'.')]
eds0 = [(max(0,sts0[0][0]-edgesize),'.')]+eds0
nsts = len(sts)
j = 0 # current left end pos
usededs = set()
for i in range(nsts): # go through right ends
while (eds0[j+1][0]<sts[i]): # find nearest left end
j += 1
if j==0:
if eds0[j][1]=='.':
usededs.add(j)
counter[0] +=1
name = '.{0}s]'.format(counter[0])
yield (chrom, eds0[j][0], sts[i], name, 0, eds0[j][1])
elif (sts[i]-eds0[j][0])>maxsize: # too far
counter[0] +=1
name = '.{0}f]'.format(counter[0])
yield (chrom, sts[i]-edgesize, sts[i], name, 0, eds0[j][1])
else: # bounded on both
if eds0[j][1]=='.':
usededs.add(j)
counter[0] +=1
name = '[.{0}a]'.format(counter[0])
yield (chrom, eds0[j][0], sts[i], name, 0, eds0[j][1])
alleds = set([i for i,x in enumerate(eds0) if (x[1]=='.')&(i!=0)])
# eds0[0] is a dummy record => don't include
unusededs = sorted(alleds.difference(usededs))
i = 0
nsts0 = len(sts0)
for j in unusededs:
while (sts0[i+1][0]<=eds0[j][0]):
i += 1
if i==nsts0:
counter[0] +=1
name = '[.{0}e'.format(counter[0])
yield (chrom, eds0[j][0], sts0[i+1][0], name, 0, sts0[i+1][1])
elif (sts0[i+1][0]-eds0[j][0])>maxsize: # too far
counter[0] +=1
name = '[.{0}f'.format(counter[0])
yield (chrom, eds0[j][0], eds0[j][0]+edgesize, name, 0, eds0[j][1])
else: # bounded on both sides
counter[0] +=1
name = '[.{0}b]'.format(counter[0])
yield (chrom, eds0[j][0], sts0[i+1][0], name, 0, sts0[i+1][1])
for x in sj2expn('+'):
yield x
for x in sj2expn('-'):
yield x
for x in sj2exns():
yield x
LOG.debug(' total {0} exon candidates'.format(counter[0]))
class MERGEEXONS(SUBASE):
"""Merge overlapping exons.
Args:
me: exon DataFrame
Returns:
:me: exon dataframe with merged exons
TempFiles:
* sjex.bed.gz
* sjex.inte.txt.gz
"""
# TODO:
# Currently only two overlapping exons are merged. Generalized to n.
def call(self):
ex = self.asm.me
fn = self.fnobj
pr = self.params
# ex vs. ex overlap
a = b = GGB.write_bed(ex, fn.bedname('mergeexons.sjex'), ncols=6)
c = fn.txtname('mergeexons.sjex.inte')
c = BT.bedtoolintersect(a,b,c,wao=True) # somehow -s (force same strand) doesn't work
cols0 = GGB.BEDCOLS[:6]
o = BT.read_ovl(c, cols0)
idxstrand = o['strand']==o['b_strand']
# select ordered overlaps (to count overlap only once)
idx1 = o['st']<o['b_st']
idx2 = (o['st']==o['b_st'])&(o['ed']<o['b_ed'])
o = o[idxstrand&(idx1|idx2)] # unique overlap pairs
def _gen():
cols = ['chr','st','ed','strand','name','b_st','b_ed','b_name']
for c,s,e,t,n,bs,be,bn in UT.izipcols(o,cols):
if e!=be:
yield (c,s,be,n+'+'+bn,0,t)
if s!=bs:
yield (c,bs,e,bn+'+'+n,0,t)
mrgd = PD.DataFrame([x for x in _gen()], columns=cols0)
me = PD.concat([ex[cols0], mrgd[cols0]], ignore_index=True)
me = me.groupby(['chr','st','ed','strand']).first().reset_index()
self.info = '#ex:{0}'.format(len(me))
#return me
self.asm.me = me
class CLUSTERSEPARATOR(SUBASE):
"""Separate possibly different genes connected by proximity of acceptor/donor.
"""
# Regard gene as a connected graph, exons as nodes and junctions as edges.
# In a node, if a junction abundance is less than cluster_th*total_abundance,
# then duplicate the node and sever the connection.
# a new node connected to the non-abundant junction with either a_id or d_id set to null
# original node a_id or d_id will be set to null or note depending on what's left there
# !!! new node also NEED to have a NEW a_id/d_id not overlapping with existing one
# this id should also assigned to separated junction as well. And here after a_id/d_id
# should be carried over and never recalculated from position
# [TODO] the rest of the assembly procedures have to be modified to carry over
# a_id, d_id (both sj & ex) throughout
# Start from selecting abundant junctions, then to nodes connected to them are the
# ones to check.
# Set "cat" at the end. Also make sure name has either starts with "[" or
# endseith "]"" at the bounded side.
# ==> NEED a facility to NOT connect junction and exon even if they
def call(self):
### IN ###
sj = self.asm.sj
me = self.asm.me
fn = self.fnobj
pr = self.params
st = self.stats
##########
if '_id' not in sj:
UT.set_ids(sj)
UT.set_ids(me)
UT.set_exon_category(sj, me)
# select SJ
cf = pr['clustersep_factor'] # 1e-3
# how much ucnt to have *cf > 10?
th = 10/cf
sj1 = sj[sj['ureads']>th]
LOG.debug('CLUSTERSEP.#sj1={0}'.format(len(sj1)))
# find exons to consider
aids = set(sj1['a_id'].values)
dids = set(sj1['d_id'].values)
idx = me['a_id'].isin(aids)|me['d_id'].isin(dids)
ex1 = me[idx] # these are connected to abundant junctions
ex2 = me[~idx] # don't have to process these
LOG.debug('CLUSTERSEP.#ex1={0}'.format(len(ex1)))
# check each exon
aid2sid = sj.groupby('a_id')['_id'].apply(lambda x: list(x))
did2sid = sj.groupby('d_id')['_id'].apply(lambda x: list(x))
if 'jcnt' not in sj:
sj['jcnt'] = [x or y for x,y in sj[['ureads','mreads']].values]
sid2ucnt = UT.df2dict(sj, '_id', 'jcnt')
cols = ['chr','st','ed','name','sc1', 'strand','a_id','d_id'] # a_id => 6, d_id =>7
caid = sj['a_id'].max()
cdid = sj['d_id'].max()
def _gen():
for ex in ex1[cols].values:
asids = aid2sid[ex[6]]
dsids = did2sid[ex[7]]
acnts = [sid2unct.get(x,0) for x in asids]
dcnts = [sid2unct.get(x,0) for x in dsids]
tot = N.sum(acnts) + N.sum(dcnts)
sjth = float(tot)*cf
atgts = [x for x,y in zip(asids,acnts) if y<sjth]
dtgts = [x for x,y in zip(dsids,dcnts) if y<sjth]
# a junctions < sjth?
if len(atgts)>0:
# make new node
nex = ex.copy()
# modify a_id
caid += 1
nex[6] = caid
sj.loc[sj['_id'].isin(atgts),'a_id'] = caid
nex[7] = 0 # separate from other set d_id to null
yield nex
if len(atgts)==len(asids): # all of a side are gone
ex[6] = 0 # set a_id of original to null
# d junctions < sjth?
if len(dtgts)>0:
nex = ex.copy()
cdid += 1
nex[7] = cdid
sj.loc[sj['_id'].isin(dtgts),'d_id'] = cdid
nex[6] = 0 # set a_id to null
yield nex
if len(dtgts)==len(dsids): # all of d side are gone
ex[7] = 0 # set d_id of the original null
yield ex
ex3 = PD.DataFrame([x for x in _gen()], columns=cols) # <= ex1
me = PD.concat([ex3[cols], ex2[cols]],ignore_index=True).sort_values(['chr','st','ed'])
# reset "cat" column
UT.set_exon_category(sj, me)
### OUT ###
fn.write_txt(sj, 'clustersep.sj')
fn.write_txt(me, 'clustersep.me')
self.asm.sj = sj
self.asm.me = me
###########
# [TODO] fix to use "cat" (from a_id, d_id) instead of looking into the exon name
#
class FINDEDGES2(SUBASE):
"""Find edges
Args:
sj: junction DataFrame
me: exon DataFrame
Returns:
:sj: compatible junctions
:me: exon dataframe with edge exons added
Related Parameters:
* mpth: mapped% th for detecting gene boundary, default {mpth}
* binth: bigwig to bed threshold, default {binth} (merging: {binth_m})
* gap: fill this gap to distinguish gene boundary vs. exon (fill exon but not intergenic), default {gap} (merging: {gap_m})
* edgesize: temporary edge exon size, default {edgesize}
* maxexonsize: max exon size (Ttn has ~20kbp exon), default {maxexonsize}
* cutlen: check candidate bounded exons larger than this for cutting, default {cutlen}
* ed_sigma: edge detector sigma for threshold (larger of minth, sigma*std is used),
default {ed_sigma} (merging: {ed_sigma_m})
* ed_minth: edge detector minth for smoothed derivative, default {ed_minth} (merging: {ed_minth_m})
* ed_covratio: edge detector if cov ratio to surrounds is < covratio, discard,
default {ed_covratio} (merging: {ed_covratio_m})
* ed_window: edge detector smooth window (in bp), default {ed_window}
* ed_covth: edge detector abs cov threshold, default {ed_covth} (merging: {ed_covth_m})
* ed_smwinsize: smooth window for trimming default {ed_smwinsize}
* ed_minintsize: window for merging peaks, default {ed_minintsize}
* ed_aggratio: default {ed_aggratio}
* ed_mimath: min-max ratio threshold for cut decision, default {ed_mimath} (merging: {ed_mimath_m})
* ed_mimath2: default {ed_mimath2} (merging: {ed_mimath2_m})
* ed_triggerth: for rise detection (detect rise when >= max*mimath*triggerth after <max*mima), default {ed_triggerth}
TempFiles:
* fe2.sjbb.bed.gz
* fe2.sjbb-ovl.txt.gz
* fe2.me1.ci.txt.gz
* fe2.me2p.ci.txt.gz
* fe2.me2n.ci.txt.gz
* fe2.sj.txt.gz
* fe2.me.txt.gz
"""
def call(self):
sj = self.asm.sj
me = self.asm.me
fn = self.fnobj
pr = self.params
st = self.stats
self.me = me
self.sj = sj
## "cat" filed?
if 'cat' not in me.columns:
UT.set_exon_category(sj,me)
# bounded (kind: a,b)=> cut
me['_id2'] = N.arange(len(me))
idx0 = me['name'].str.contains('s|e|f') # unbounded
idx1 = me['cat'].isin(['5','3','s'])
idx = idx0|idx1 # unbounded won't become bounded but bounded may have been separated
me1 = me[~idx] # bounded cut these
me2 = me[idx] # not (a,b) kind:(s,e,f) => fix (~20K)
# do it at the level of ci
ci = UT.chopintervals(me1, fn.txtname('findedges2.me1.ci'), idcol='_id2')
binfile = self.bw2bed(pr['binth'])
sjbbname = UT.write_pandas(ci[['chr','st','ed','name','id']], fn.bedname('findedges2.sjbb'), '')
bbg = BT.calc_ovlratio(
aname=sjbbname,
bname=binfile,
tname=fn.txtname('findedges2.sjbb-ovl'),
nacol=5, nbcol=3
)
# calculate mp and len
bbg['len'] = bbg['ed'] - bbg['st']
bbg['name1'] = bbg['name'].astype(str).apply(lambda x:[int(y) for y in x.split(',')])
# name1 contains list of ids
ci1 = bbg[(bbg['ovlratio']<pr['mpth'])|(bbg['len']>=pr['cutlen'])] # ~ 16K
# cut candidates
eids1 = reduce(iadd, ci1['name1'].values, []) # exons being cut
eids0 = sorted(set(me1['_id2'].values).difference(set(eids1)))
me1i = me1.set_index('_id2') # me1 (bounded) indexed
me1s = me1i.ix[eids1].reset_index() # cut targets
me0 = me1i.ix[eids0].reset_index() # remaining (non-cut) exons
LOG.debug('cutting {0} ci chrom-wise'.format(len(ci1)))
me1r = self.process_mp(ci1,me1s,cutedges_m)
# fix me2
me2p = me2[me2['name'].str.startswith('[')]
ci2p = UT.chopintervals(me2p, fn.txtname('findedges2.me2p.ci'), idcol='_id2')
ci2p = ci2p.rename(columns={'id':'sc1'})
ci2p['direction'] = '+'
me2n = me2[me2['name'].str.endswith(']')]
ci2n = UT.chopintervals(me2n, fn.txtname('findedges2.me2n.ci'), idcol='_id2')
ci2n = ci2n.rename(columns={'id':'sc1'})
ci2n['direction'] = '-'
LOG.debug('fixing {0} cip chrom-wise'.format(len(ci2p)))
me2pr = self.process_mp(ci2p,me2p,fixedges_m)
LOG.debug('fixing {0} cin chrom-wise'.format(len(ci2n)))
me2nr = self.process_mp(ci2n,me2n,fixedges_m)
# concatenate
cols = ['chr','st','ed','name','sc1','strand','_id2']
me3 = PD.concat([me0[cols],me1r[cols],me2pr[cols],me2nr[cols]],ignore_index=True).sort_values(['chr','st','ed'])
me3 = me3.groupby(['chr','st','ed','strand']).first().reset_index()
# find consistent sj
UT.set_info(sj,me3)
# acceptor
aids = set(me3['a_id'].values).intersection(set(sj['a_id'].values))
dids = set(me3['d_id'].values).intersection(set(sj['d_id'].values))
sja = sj.set_index('a_id').ix[aids].reset_index()
dids = dids.intersection(set(sja['d_id'].values))
sjd = sja.set_index('d_id').ix[dids].reset_index()
#return sjd, me3
fn.write_txt(sjd, 'findedges2.sj')
fn.write_txt(me3, 'findedges2.me')
self.asm.sj = sjd
self.asm.me = me3
def process_mp(self, ci, me, func):
pr = self.params
bwname = self.fnobj.bwfile
args = []
for chrom in self.chroms(me):
mechr = me[me['chr']==chrom][['chr','st','ed','name','strand','_id2']].copy()
cichr = ci[ci['chr']==chrom].copy()
args.append((cichr,mechr,bwname,pr,chrom))
rslts = UT.process_mp(func, args, pr['np'])
# rslts = []
# if pr['np']==1:
# for i,arg in enumerate(args):
# tmp = func(*arg) #cutedges_m(arg)
# LOG.debug(' processing {3}: {0}/{1} {2}...'.format(i+1,len(args),len(tmp),arg[-1]))
# rslts.append(tmp)
# return PD.concat(rslts,ignore_index=True)
# try:
# p = multiprocessing.Pool(pr['np'])
# tmp = p.map(func, args)
# finally:
# LOG.debug(' closing pool')
# p.close()
# return PD.concat(tmp, ignore_index=True)
return PD.DataFrame(rslts, columns=['chr','st','ed','name','sc1','strand','_id2'])
def cutedges_m(ci,me,bwname,pr,chrom):
# input ci: chopped interval, me: exons, pr: params
# return cut exons
# convert ci => cicut [+side col]
edgedet = EdgeDetector(
bwname=bwname,
sigmath=pr['ed_sigma'],
minth=pr['ed_minth'],
covratio=pr['ed_covratio'],
winsize=pr['ed_window'],
covth=pr['ed_covth'],
gapth=pr['binth'],
gap=pr['gap'],
smwinsize=pr['ed_smwinsize'],
minintsize=pr['ed_minintsize'],
aggregateratio=pr['ed_aggratio'],
mimath=pr['ed_mimath'],
mimath2=pr['ed_mimath2'],
triggerth=pr['ed_triggerth'])
cicols = ['chr','st','ed','name','sc1']
def _gen():
with edgedet:
for chrom0,st0,ed0,name,cid in UT.izipcols(ci,cicols):
cuts = edgedet.cutboth(chrom0,st0,ed0)
# left
for chrom1,st1,ed1 in cuts[0]:
yield (chrom,st1,ed1,name,cid, 0) # 0: left
# right
for chrom1,st1,ed1 in cuts[1]:
yield (chrom,st1,ed1,name,cid, 1) # 1: right
# both
for chrom1,st1,ed1 in cuts[2]:
yield (chrom,st1,ed1,name,cid, 2) # 2: both
tmp = [c for c in _gen()]
#LOG.debug('cut ci {2}: {0}=>{1}'.format(len(ci),len(tmp),chrom))
df = PD.DataFrame(tmp, columns=['chr','st','ed','name','cid','side'])
# now put them together as exons
# eid (_id2 in me) <=> cid (in df): encoded in df[name]
df['eid'] = df['name'].astype(str).apply(lambda x:[int(y) for y in x.split(',')])
dff = UT.flattendf(df, 'eid')
# eid=>strand, eid=>name
e2strand = dict(UT.izipcols(me, ['_id2','strand']))
e2name = dict(UT.izipcols(me, ['_id2','name']))
e2st = dict(UT.izipcols(me, ['_id2','st']))
e2ed = dict(UT.izipcols(me, ['_id2','ed']))
dff['strand'] = [e2strand[x] for x in dff['eid']]
dff['ename'] = [e2name[x] for x in dff['eid']]
dff['est'] = [e2st[x] for x in dff['eid']]
dff['eed'] = [e2ed[x] for x in dff['eid']]
dff = dff.sort_values(['eid','cid'])
def _egen():
for eid, gr in dff.groupby('eid',sort=False):
if len(gr)==1 and gr.iloc[0]['side']==2:# no cut
c,s,e,n,r,cid = gr.iloc[0][['chr','est','eed','ename','strand','cid']]
yield (c,s,e,n,cid,r,eid)
continue
cids = sorted(gr['cid'].unique())
gri = gr.set_index('cid')
cnt = 1
# left
for cid in cids:
gci = gri.ix[[cid]].set_index('side')
# side 0
try:
test = gci.ix[0]
s0 = gci.ix[[0]]
for c,s,e,n,r in s0[['chr','est','ed','ename','strand']].values:
n1 = '{0}*(l{1})'.format(n,cnt)
cnt += 1
yield (c,s,e,n1,cid,r,eid)
except:
pass
# if not side 2 break
try:
s2 = gci.ix[2]
if cid==cids[-1]: # last one all connected
c,s,e,n,r = s2[['chr','est','eed','ename','strand']].values
yield (c,s,e,n,-1,r,eid)
except:
break
# right
for cid in cids[::-1]:
gci = gri.ix[[cid]].set_index('side')
# side 1
try:
test = gci.ix[1]
s1 = gci.ix[[1]]
for c,s,e,n,r in s1[['chr','st','eed','ename','strand']].values:
n1 = '(r{1})*{0}'.format(n,cnt)
cnt += 1
yield (c,s,e,n1,cid,r,eid)
except:
pass
# if not side 2 break
try:
s2 = gci.ix[2]
except:
break
tmp2 = [e for e in _egen()]
#LOG.debug('cut ex {2}: {0}=>{1}'.format(len(me),len(tmp2),chrom))
return tmp2
# edf = PD.DataFrame(tmp2, columns=['chr','st','ed','name','sc1','strand','_id2'])
# return edf
def fixedges_m(ci,me,bwname,pr,chrom):
# input me: exons only need to be fixed in one direction (kind: s,e,f)
# startswith [ => '+', endswith ] => '-'
edgedet = EdgeDetector(bwname,
sigmath=pr['ed_sigma'],
minth=pr['ed_minth'],
covratio=pr['ed_covratio'],
winsize=pr['ed_window'],
covth=pr['ed_covth'],
gapth=pr['binth'],
gap=pr['gap'],
smwinsize=pr['ed_smwinsize'],
minintsize=pr['ed_minintsize'],
aggregateratio=pr['ed_aggratio'],
mimath=pr['ed_mimath'],
mimath2=pr['ed_mimath2'],
triggerth=pr['ed_triggerth'])
cicols = ['chr','st','ed','name','sc1','direction']
def _gen():
with edgedet:
for chrom0,st0,ed0,name,cid,d in UT.izipcols(ci,cicols):
cuts = edgedet.cutone(chrom0,st0,ed0,d)
# left
for chrom1,st1,ed1 in cuts[0]:
yield (chrom,st1,ed1,name,cid, 0, d) # 0: left
# right
for chrom1,st1,ed1 in cuts[1]:
yield (chrom,st1,ed1,name,cid, 1, d) # 1: right
# both
for chrom1,st1,ed1 in cuts[2]:
yield (chrom,st1,ed1,name,cid, 2, d) # 2: both
tmp = [c for c in _gen()]
#LOG.debug('cut ci2 {2}: {0}=>{1}'.format(len(ci),len(tmp),chrom))
df = PD.DataFrame(tmp, columns=['chr','st','ed','name','cid','side','direction'])
# now put them together as exons
# eid (_id2 in me) <=> cid (in df): encoded in df[name]
df['eid'] = df['name'].astype(str).apply(lambda x:[int(y) for y in x.split(',')])
dff = UT.flattendf(df, 'eid')
# eid=>strand, eid=>name
e2strand = dict(UT.izipcols(me, ['_id2','strand']))
e2name = dict(UT.izipcols(me, ['_id2','name']))
e2st = dict(UT.izipcols(me, ['_id2','st']))
e2ed = dict(UT.izipcols(me, ['_id2','ed']))
dff['strand'] = [e2strand[x] for x in dff['eid']]
dff['ename'] = [e2name[x] for x in dff['eid']]
dff['est'] = [e2st[x] for x in dff['eid']]
dff['eed'] = [e2ed[x] for x in dff['eid']]
dff = dff.sort_values(['eid','cid'])
def _egen():
for eid, gr in dff.groupby('eid',sort=False):
if len(gr)==1 and gr.iloc[0]['side']==2:# no cut
c,s,e,n,r,cid = gr.iloc[0][['chr','est','eed','ename','strand','cid']]
yield (c,s,e,n,cid,r,eid)
continue
cids = sorted(gr['cid'].unique())
gri = gr.set_index('cid')
cnt = 1
direction = gr.iloc[0]['direction']
# only do left or right depending on direction
if direction=='+':
# left
for cid in cids:
gci = gri.ix[[cid]].set_index('side')
# side 0
try:
test = gci.ix[0]
s0 = gci.ix[[0]]
for c,s,e,n,r in s0[['chr','est','ed','ename','strand']].values:
n1 = '{0}*(l{1})'.format(n,cnt)
cnt += 1
yield (c,s,e,n1,cid,r,eid)
except:
pass
# if not side 2 break
try:
s2 = gci.ix[2]
if cid==cids[-1]: # last one all connected
c,s,e,n,r = s2[['chr','est','eed','ename','strand']].values
yield (c,s,e,n,-1,r,eid)
except:
break
else:
# right
for cid in cids[::-1]:
gci = gri.ix[[cid]].set_index('side')
# side 1
try:
test = gci.ix[1]
s1 = gci.ix[[1]]
for c,s,e,n,r in s1[['chr','st','eed','ename','strand']].values:
n1 = '(r{1})*{0}'.format(n,cnt)
cnt += 1
yield (c,s,e,n1,cid,r,eid)
except:
pass
# if not side 2 break
try:
s2 = gci.ix[2]
if cid==cids[0]: # last one all connected
c,s,e,n,r = s2[['chr','est','eed','ename','strand']].values
yield (c,s,e,n,-1,r,eid)
except:
break
tmp2 = [e for e in _egen()]
#LOG.debug('cut ex {2}: {0}=>{1}'.format(len(me),len(tmp2),chrom))
return tmp2
# edf = PD.DataFrame(tmp2, columns=['chr','st','ed','name','sc1','strand','_id2'])
# return edf
class FINDEDGES(SUBASE):
"""Find edge exons. (Add * to the name when cut.)
Args:
me: exon DataFrame
Returns:
:me: exon dataframe with edge exons added
Related Parameters:
* mpth: mapped% th for detecting gene boundary, default:{mpth} (merging: {mpth_m})
* binth: default {binth} (merging: {binth_m})
* gap: fill this gap to distinguish gene boundary vs. exon (fill exon but not intergenic)
default {gap} (merging: {gap_m})
* edgesize: temporary edge exon size, default {edgesize}
TempFiles:
* fe.sjbb.bed.gz
* fe.sjbb-ovl.txt.gz
* fe.exons.bed.gz
"""
def call(self):
sjexdf = self.asm.me
fn = self.fnobj
pr = self.params
gap = pr['gap']
edgesize = pr['edgesize']
# covarage file
binfile = self.bw2bed(pr['binth'])
gapfile = self.fillgap(gap, binfile)
# write sjex bounded both
idx = (sjexdf['name'].str.startswith('['))&(sjexdf['name'].str.endswith(']'))
sjbb = sjexdf[idx]
sjbbname = fn.write_bed(sjbb, 'findedges.sjbb', ncols=6)
# bedtools intersect
ovlname = fn.txtname('findedges.sjbb-ovl')
bbg = BT.calc_ovlratio(sjbbname, gapfile, ovlname, nacol=6, nbcol=3)
# write internal exons (>pr['mpth'])
inexs = bbg[bbg['ovlratio']>=pr['mpth']]
# turn ones with ratio<pr['mpth'] into edges and write together with
# other edge exons
edexs1 = sjexdf[~idx]
edges = bbg[bbg['ovlratio']<pr['mpth']]
cols = GGB.BEDCOLS[:6] # ['chr','st','ed','name','sc1','strand']
cols0 = ['chr','st','ed','strand','ovlratio','name']
if len(edges)>0:
def _iter_edges():
for chrom,st,ed,strand,ratio,name in edges[cols0].values:
# name = [(strand)(counter)(kind)]
yield (chrom,st,st+edgesize,name[:-1]+'*',ratio,strand)
# name = [(strand)(counter)(kind)*
yield (chrom,ed-edgesize,ed,'*'+name[1:],ratio,strand)
# name = *(strand)(counter)(kind)]
edexs2 = PD.DataFrame([x for x in _iter_edges()], columns = cols)
edexs = PD.concat([edexs1[cols],edexs2[cols]], ignore_index=True)
else:
edexs = edexs1[cols]
edexs = edexs.sort_values(['chr','st','ed','strand','name'])
edexs = edexs.groupby(['chr','st','ed','strand'],sort=False).first().reset_index()
# return concat
exons = PD.concat([inexs[cols], edexs[cols]], ignore_index=True)
#return exons
fn.write_bed(exons, 'findedges.exons', ncols=6)
self.asm.me = exons
class FIXSTRAND(SUBASE):
"""Assign strand to unstranded elements using information from connected components.
Args:
sj: junction dataframe
me: exon dataframe
Returns:
:sj: junction dataframe with strand fixed
:me: exon dataframe with strand fixed
Related Parameters:
* useallconnected (bool): whether to use all connected components or just direct neighbors
default {useallconnected}
TempFiles:
* sj0.bed.gz
"""
def call(self):
sj = self.asm.sj
me = self.asm.me
fn = self.fnobj
pr = self.params
st = self.stats
useallconnected=pr['useallconnected']
# ureadth = pr['ureadth']
# mreadth = pr['mreadth']
# sj = sj[((sj['ureads']>ureadth)|(sj['mreads']>mreadth))].copy()
mg = GP.MEGraph2(sj,me) # connections
# fix unstranded exons
idx = me['strand']=='.'
tmp = me[idx]
if not useallconnected:
cnt = [Counter(me.ix[mg.ex_ex(x)]['strand'].values) for i,x in enumerate(tmp['_id'].values)]
else:
cnt = [Counter(me.ix[mg.connected(x)]['strand'].values) for i,x in enumerate(tmp['_id'].values)]
LOG.debug('#unstranded={0}'.format(N.sum(idx)))
st['FIXSTRAND.#unstranded_exons_before'] = N.sum(idx)
me.loc[idx,'strand'] = [x.most_common()[0][0] for x in cnt]
idx = me['strand']=='.'
LOG.debug('#unstranded_exons_after={0}'.format(N.sum(idx)))
st['FIXSTRAND.#unstranded_exons_after'] = N.sum(idx)
# make sure there's no missing strand col
me.loc[me['strand'].isnull(), 'strand'] = '.'
# fix unstranded junctions
idx = sj['strand']=='.'
tmp = sj[idx]
cnt = [Counter(mg.sj_ex(x,'strand')) for i,x in tmp.iterrows()]
LOG.debug('#unstranded junctions={0}'.format(N.sum(idx)))
st['FIXSTRAND.#unstranded_junctions_before'] = N.sum(idx)
sj.loc[idx,'strand'] = [x.most_common()[0][0] if len(x)>0 else '.' for x in cnt]
idx = sj['strand']=='.'
LOG.debug('#still unstranded={0}'.format(N.sum(idx)))
st['FIXSTRAND.#unstranded_junctions_after'] = N.sum(idx)
if N.sum(idx)>0:
LOG.warning('discarding {0} still unstranded junctions...'.format(N.sum(idx)))
sj = sj[~idx].copy()
#return sj, me
fn.write_bed(sj, 'fixstrand.sj', ncols=7)
self.asm.sj = sj
self.asm.me = me
def fixedge(posarr,exs,strand,gap,utr,ignorefirstdonors,covfactor):
def _gen():
NAME = {True:{'gap_too_large':'==',
'assertfail_2_donor':'=D',
'assertfail_2_bined':'=B',
'acceptor':'_a',
'donor':'=d]',
'opposite':'_o'},
False:{'gap_too_large':'==',
'assertfail_2_donor':'D=',
'assertfail_2_bined':'B=',
'acceptor':'a_',
'donor':'[d=',
'opposite':'o_'}}
flag = ((strand=='+')&(utr=='3pr')) or ((strand=='-')&(utr=='5pr'))
delta = 1 if flag else -1
def _nposkind(i):
npos, kind, cov = posarr.ix[i][['pos','kind','cov']]
return npos, kind[1:], cov # remove initial one char which was for sorting
def _step1(i,dcp):# look for bined or donor
# (None(close) or newpos, reason(kind), newidx, coverage)
if (i+1==len(posarr)):
return None,'outofrange_1',i+1,0.
npos, kind, cov = _nposkind(i+1)
while((kind=='acceptor')|(kind=='binst')|((kind=='donor')&(delta*(dcp-npos)>0))):
i += 1
if (i+1==len(posarr)):
return None,'outofrange_1',i+1, 0.
npos, kind, cov = _nposkind(i+1)
#if kind=='binst': # this cannot be the case<== actually if using different gap then happens
# return None,'assertfail_1_binst',i+1, cov
if kind=='opposite': # stop here
return npos, kind, i+1, cov
# the rests are bined and donor, prioritize donor
while(i+1<len(posarr)):
npos2, kind2, cov2 = _nposkind(i+1)
if npos2 != npos:
break
i += 1
#if kind2=='donor':
# kind = 'donor'
return npos, kind, i, cov # i+1 is next pos so return i
def _step2(i):# look for next interval
if (i+1==len(posarr)):
return None,'outofrange_2',i+1,0.
npos, kind, cov = _nposkind(i+1)
if kind in ['bined','donor']: # this cannot be the case
return None,'assertfail_2_'+kind,i+1,0
# the rests are binst, acceptor, or opposite
return npos, kind, i+1, cov
def _next(cpos,i0,donorcheckpos,cov1=None):
# cpos: current end of interval could be None
# i0: current index pos into posarr
# donorcheckpos: (for SE attachment) ignore donor within this
# cov1: current coverage, could be None
# cpos only updated at bined (end of interval) or at donor
npos1, kind1, i1, cov1n = _step1(i0,donorcheckpos) # step1 find ends
if cov1 is None:
cov1 = cov1n
if npos1 is None: # didn't find end=> close at current end
# (close?, cur_end_pos, reason, cur_idx, coverage)
return (True, cpos, kind1, i1, cov1)
if kind1=='bined': # found end of an interval
# but it could be an interval that we ignored, so check coverage
if cov1*covfactor <= cov1n: # update pos/ use '<= ' so that cov1=cov1n=0 is ok
cpos = npos1
# then find the start of the next interval
npos2, kind2, i2, cov2 = _step2(i1)
if npos2 is None:
if kind2=='outofrange_2': # no further intervals
return (True, cpos,'gap_too_large',i1, None) # close at current pos
return (True, cpos, kind2, i2, None) # assert fail
# kind2 must be binst or acceptor or opposite
if kind2=='binst':
if abs(npos2-cpos)<=gap: # keep moving
return (False, cpos, 'binst', i2, cov1)
return (True, cpos, 'gap_too_large', i1, None) # close
# kind2 must be acceptor or opposite, close
return (True, cpos, kind2, i1, None)
# must be donor or opposite, close
if kind1=='donor':
if cov1*covfactor <= cov1n: # update pos/ use '<= ' so that cov1=cov1n=0 is ok
cpos = npos1
return (True, cpos, kind1, i1, None)
# must be opposite
return (True, cpos, kind1, i1, None)
# ex: ['chr','st','ed','name',...]
if flag:
tgt,tgt2 = 1,2 # st, ed
else:
tgt,tgt2 = 2,1 # ed, st
for ex in exs.values:
ex = list(ex)
pos = ex[tgt]
if ignorefirstdonors:
donorcheckpos = ex[tgt2]
else:
donorcheckpos = ex[tgt]
tmp = posarr[posarr['pos']==pos]['idx']
if len(tmp)==0:
continue
i0 = tmp.iloc[-1]
#LOG.debug('======= initial pos={0}'.format(pos))
close,npos,kind,i0,cov = _next(pos,i0,donorcheckpos,None)
while(not close):
close,npos,kind,i0,cov = _next(npos,i0,donorcheckpos,cov)
if (npos is not None) and (npos!=pos): # yield if there's end
if flag:
ex[3] = ex[3]+NAME[flag][kind]
else:
ex[3] = NAME[flag][kind]+ex[3]
ex[tgt2] = npos
if kind.startswith('assert'):
LOG.debug(' error:{0}/{1}/{2}/{3}/{4}'.format(UT.exid(ex), kind, npos, i0,ex[3]))
LOG.debug(posarr[i0-3:i0+3])
yield ex
else:
#if printerr:
LOG.debug(' error:{0}/{1}/{2}/{3}/{4}'.format(UT.exid(ex), kind, npos, i0,ex[3]))
if not kind.startswith('outofrange'):
LOG.debug(posarr[i0-3:i0+3])
rslt = [x for x in _gen()]
return rslt
class EDGEFIXER(SUBASE):
"""Fix edge exons by extending.
Args:
me: exon dataframe
Returns:
:me: exon dataframe with edge exons fixed
:edgefixer: this instance for reuse later
Related Parameters:
* gap3: for 3' UTR extension, default {gap3} (merging: {gap3_m})
* gap5: for 5' UTR extension, default {gap5} (merging: {gap5_m})
* covfactor: for gap filling: if coverage of the next interval is < covfactor*current cov
default {covfactor}
TempFiles:
* fixed5pr.bed.gz
* fixed3pr.bed.gz
* edgefixer.me.bed.gz
"""
# [TODO]
# - how to distinguish gaps in real 3'UTRs vs. gaps between genes or within exons?
# => use coverage information?
# => EDGEFIXER2 does this
def call(self):
me = self.asm.me[GGB.BEDCOLS[:6]] # part of the code assumes standard BED order
fn = self.fnobj
pr = self.params
st = self.stats
gap3 = pr['gap3']
gap5 = pr['gap5']
override = pr['override']
covfactor = pr['covfactor']
# make interval bed with strand info
self.bindf = bindf = self.make_bindf(me) #sname, exons, binth=binth, override=override)
# 3prime
fname = fn.bedname('edgefixer.fixed3pr')
if override or (not os.path.exists(fname)):
LOG.debug(' fixing 3pr exons...')
fixed3pr = self.process_edges(me, bindf, utr='3pr',gap=gap3,covfactor=covfactor)
GGB.write_bed(fixed3pr, fname, ncols=6)
else:
LOG.debug(' reading cached fixed3pr ...')
fixed3pr = GGB.read_bed(fname)
# find reconnected and remove the other side
def _remove(fixed, me):
tmp0 = fixed[fixed['name'].str.endswith('*=d]')]
tmp1 = fixed[fixed['name'].str.startswith('[d=*')]
remove0 = dict([('*'+x[1:-4]+']',p) for x,p in UT.izipcols(tmp0,['name','ed'])])
remove1 = dict([('['+x[4:-1]+'*',p) for x,p in UT.izipcols(tmp1,['name','st'])])
LOG.debug(' removing {0}/{1}: me len={2}'.format(len(remove0),len(remove1),len(me)))
me0a = me[me['name'].isin(remove0.keys())]
me1a = me[me['name'].isin(remove1.keys())]
LOG.debug(' me0a({0}), me1a({1})'.format(len(me0a),len(me1a)))
me0b = me0a[me0a['ed']==[remove0[x] for x in me0a['name']]]
me1b = me1a[me1a['st']==[remove1[x] for x in me1a['name']]]
LOG.debug(' me0b({0}), me1b({1})'.format(len(me0b),len(me1b)))
idx0 = list(me0b.index.values)+list(me1b.index.values)
idx1 = me.index.isin(idx0)
LOG.debug(' idx0({0}), idx1({1}))'.format(len(idx0),N.sum(idx1)))
me1 = me[~idx1]
LOG.debug(' after removing: me len={0} diff={1}'.format(len(me1), len(me)-len(me1)))
return me1
me = _remove(fixed3pr, me)
# 5prime
fname = fn.bedname('edgefixer.fixed5pr')
if override or (not os.path.exists(fname)):
LOG.debug(' fixing 5pr exons...')
fixed5pr = self.process_edges(me, bindf,utr='5pr',gap=gap5,covfactor=covfactor)
GGB.write_bed(fixed5pr, fname, ncols=6)
else:
LOG.debug(' reading cached fixed5pr ...')
fixed5pr = GGB.read_bed(fname)
me = _remove(fixed5pr, me)
UT.set_ptyp(me)
inexs = me[me['ptyp']=='i']
UT.set_ptyp(fixed3pr)
UT.set_ptyp(fixed5pr)
cols = GGB.BEDCOLS[:6]+['ptyp']
nexons = PD.concat([inexs[cols], fixed3pr[cols], fixed5pr[cols]], ignore_index=True)
self.nexons = nexons = nexons.groupby(['chr','st','ed','strand','ptyp']).last().reset_index()
#return nexons
fn.write_bed(nexons, 'edgefixer.me', ncols=6)
self.asm.me = nexons
self.asm.edgefixer = self
def fixSEedge(self, me, targets, utr):
pr = self.params
if utr=='3pr':
gap = pr['gap3']
else:
gap = pr['gap5']
return self.process_edges_subset(me, self.bindf, targets, utr, gap=gap,covfactor=pr['covfactor'])
def make_bindf(self, me):
#sname, exons, binth=0, override=False
fn = self.fnobj
pr = self.params
override = pr['override']
# assign strand using exons
bfile = fn.txtname('edgefixer.bindf')
if (not os.path.exists(bfile)) or override:
binfile = self.bw2bed(pr['binth'])
if pr['binstrand']=='.':
efile = fn.write_bed(me, 'edgefixer.exons', ncols=6)
ofile = fn.txtname('edgefixer.bindf-exons')
ofile = BT.bedtoolintersect(binfile,efile,ofile,wao=True)
cols = ['chr','st','ed','ex_chr','ex_st','ex_ed','name','sc1','strand','ovl']
df = UT.read_pandas(ofile,names=cols)
tg = df.groupby(['chr','st','ed'])
t2 = tg.first() # choose first, then fix intervals with multiple elements
t2s = tg.size()
tidx = t2s[t2s>1].index # one with more than 1 overlapping element
tmp = df.set_index(['chr','st','ed']).ix[tidx]
cnt = tmp.groupby(tmp.index)['strand'].apply(lambda x: len(set(x)))
sidx = cnt[cnt>1].index # more than one strand
if len(sidx)>0:
t2.ix[sidx,'strand'] = '.' # multiple strand over this inverval => set to '.'
t3 = t2.reset_index()[['chr','st','ed','name','ovl','strand']]
t4 = CC.calc_cov_mp(t3, fn.bwfile, bfile, pr['np'])
# clean up
os.unlink(efile)
os.unlink(ofile)
return t4
else:
df = GGB.read_bed(binfile) # chr,st,ed
df['strand'] = pr['binstrand']
UT.save_tsv_nidx_whead(df, bfile)
return df
else:
return UT.read_pandas(bfile)
# subtract internal exons ...=> wasn't a good idea
# inexfile = os.path.join(MD.SJEXDIR, sname+'.sj.inex.bed.gz')
# binfile2 = os.path.join(MD.SJEXDIR, sname+'.bindf.bed')
# if (not os.path.exists(binfile2+'.gz')) or override:
# BT.bedtoolsubtract(binfile, inexfile, binfile2)
# return GGB.read_bed(binfile2+'.gz')
def _make_arr(self, chrom, strand, bindf, exons, utr='3pr'):
binchr = bindf[(bindf['chr']==chrom)&(bindf['strand'].isin([strand,'.']))]
exchr = exons[(exons['chr']==chrom)&(exons['strand']==strand)]
#exchr2 = exons[(exons['chr']==chrom)&(exons['strand']!=strand)] # opposite strand
lex = exchr[exchr['name'].str.startswith('[')]
rex = exchr[exchr['name'].str.endswith(']')]
def _todf(df,tgt,name):
tmp = PD.DataFrame(df[tgt].values, columns=['pos'])
tmp['kind'] = name
if 'cov' in df.columns:
tmp['cov'] = df['cov'].values
else:
tmp['cov'] = 0.
return tmp
if ((strand=='+')&(utr=='3pr')) or ((strand=='-')&(utr=='5pr')) :
posarr = PD.concat([_todf(binchr,'st','2binst'), # going right direction
_todf(binchr,'ed','2bined'),
# make sure acceptor/donor comes before binst/ed (initial 1&2)
_todf(lex,'st','1acceptor'),
# 5pr donor regard as acceptor for the purpose of edge extension
_todf(rex,'ed','1donor'),
#_todf(exchr2,'st','opposite')
],
ignore_index=True).sort_values(['pos','kind'])
eidx = (exchr['name'].str.startswith('['))&(~exchr['name'].str.endswith(']'))
else:# (-,3') or (+,5')
posarr = PD.concat([_todf(binchr,'st','1bined'), # going left direction
_todf(binchr,'ed','1binst'),
# since going opposite direction acceptor/donor after binst/ed
_todf(lex,'st','2donor'),
_todf(rex,'ed','2acceptor'),
#_todf(exchr2,'ed','opposite')
],
ignore_index=True).sort_values(['pos','kind'], ascending=False)
eidx = (~exchr['name'].str.startswith('['))&(exchr['name'].str.endswith(']'))
exs = exchr[eidx]
posarr.index = N.arange(len(posarr))
posarr['idx'] = posarr.index.values #N.arange(len(posarr))
return posarr, exs
def process_edges(self, exons, bindf, utr, gap=300, covfactor=0.2):
pr = self.params
LOG.debug(' preparing data...')
args = []
for chrom in self.chroms(exons): # exons['chr'].unique():
for strand in ['+','-','.']:
posarr, exs = self._make_arr(chrom,strand,bindf,exons,utr)
if len(exs)==0:
continue
args.append((posarr, exs, strand, gap, utr, False, covfactor))#,printerr))
rslts = UT.process_mp(fixedge, args, pr['np'])
# rslts = []
# if np==1:
# for arg in args:
# rslts += fixedge(arg)
# else:
# try:
# p = multiprocessing.Pool(np)
# tmp = p.map(fixedge, args)
# LOG.debug('done fixEdge calculation: np={0}'.format(np))
# finally:
# LOG.debug('closing pool')
# p.close()
# rslts = reduce(iadd, tmp)
return PD.DataFrame(rslts, columns=exons.columns)
def process_edges_subset(self, exons, bindf, targets, utr, gap=300, covfactor=0.2):
LOG.debug(' preparing data...')
args = []
for chrom in self.chroms(exons): #exons['chr'].unique():
for strand in ['+','-','.']:
idx = (targets['chr']==chrom)&(targets['strand']==strand)
if N.sum(idx)==0:
continue
posarr, exs = self._make_arr(chrom,strand,bindf,exons,utr)
exs = targets[idx]
args.append((posarr, exs, strand, gap, utr, True, covfactor))#,printerr))
rslts = UT.process_mp(fixedge, args, self.params['np'])
# rslts = []
# if np==1:
# for arg in args:
# rslts += fixedge(arg)
# else:
# try:
# p = multiprocessing.Pool(np)
# tmp = p.map(fixedge, args)
# LOG.debug('done fixEdge calculation: np={0}'.format(np))
# finally:
# LOG.debug('closing pool')
# p.close()
# rslts = reduce(iadd, tmp)
return PD.DataFrame(rslts, columns=targets.columns)
class FINDIRETS(SUBASE):
"""Find intron retentions.
Args:
sj: junction DataFrame
me: exon DataFrame
Returns:
:me: exons with irets
Related Parameters:
* iret_mpth: mapped% th for detecting intron retension,
default {iret_mpth} (merging: {iret_mpth_m})
* iret_covth: if intron cov smaller than this, then ignore
default {iret_covth} (merging: {iret_covth_m})
* iret_covratio: min cov ratio between an iret and average of surrounding exons
default {iret_covratio} (merging: {iret_covratio_m})
* binth: default {binth} (merging: {binth_m})
TempFiles:
* irets.bed.gz
* irets.exons.txt.gz
* irets.me.cov.txt.gz
* irets.me.bed.gz
* irets.me.covci.txt.gz
* irets.me.ci.txt.gz
* sj.iret.sub.bed.gz
* sj.iret.ci.txt.gz
* sj.iret.covci.txt.gz
"""
def call(self):
self.sj = sj = self.asm.sj
self.me = me = self.asm.me
fn = self.fnobj
pr = self.params
override = pr['override']
iretmpth = pr['iret_mpth']
binth = pr['binth']
dfname = fn.bedname('findirets.irets')
if override or (not os.path.exists(dfname)):
LOG.debug(' finding iret...')
irets = self.find_irets(sj, me, mpth=iretmpth, binth=binth)
else:
LOG.debug(' reading cached iret...')
irets = GGB.read_bed(dfname)
irets['ptyp'] = 'r'
cols = GGB.BEDCOLS[:6]+['ptyp']
if 'ptyp' not in me.columns:
UT.set_ptyp(me)
nexons = PD.concat([me[cols],irets[cols]], ignore_index=True)
self.nexons = nexons = nexons.groupby(['chr','st','ed','strand','ptyp']).first().reset_index()
#return nexons
fn.write_txt(nexons, 'findirets.exons', fm='h')
self.asm.me = nexons
def find_irets(self, sj, me, mpth=0.95, binth=0):
fn = self.fnobj
pr = self.params
st = self.stats
override = pr['override']
# covarage file
binfile = self.bw2bed(binth)
sjfile = self.sjfile()
cname = fn.txtname('findirets.sj.mp')
sjmp = BT.calc_ovlratio(
aname=sjfile,
bname=binfile,
tname=cname,
nacol=7,
nbcol=3
)
# match records between sjmp and mg.sj
sjmp['str_id'] = UT.calc_locus(sjmp)
sj['str_id'] = UT.calc_locus(sj)
sid2ovl = UT.df2dict(sjmp, 'str_id','ovlratio')
sj['ovlratio'] = [sid2ovl.get(x,N.nan) for x in sj['str_id']]
if '_id' not in sj.columns:
UT.set_ids(sj)
if '_id' not in me.columns:
UT.set_ids(me)
if ('st_id' not in sj.columns) or ('st_id' not in me.columns):
UT.set_pos_info(sj,me)
sj['st-1'] = sj['st']-1
sj2 = sj[sj['ovlratio']>=mpth].copy()
# calc sj cov 1. subtract me, 2. calc_cov_ovl
mefile = fn.write_bed(me, 'findirets.me', ncols=3)
sjname = fn.bedname('findirets.sj')
sjname = UT.write_pandas(sj2[['chr','st-1','ed','_id']], sjname, '')
cname = fn.bedname('findirets.sj.sub')
if override or (not os.path.exists(cname)):
cname = BT.bedtoolsubtract(sjname, mefile, cname)
try:
sub = GGB.read_bed(cname) # for sparse data like single cell this may be empty
except:
self.irets = irets = []
LOG.info('#irets candidates:{0}'.format(len(irets)))
st['FINDIRETS.#irets_sj'] = len(irets)
cols = GGB.BEDCOLS[:6] # ['chr','st','ed','name','sc1','strand']
LOG.warning('******************** NO IRET FOUND!***********************')
return PD.DataFrame(N.zeros((0,len(cols))),columns=cols) # return empty dataframe
# bed: idcol=name, moreover
# sj is reduced (some sj completely overlap with exon)
sj2 = sj2.set_index('_id').ix[sub['name'].values].reset_index()
ciname = fn.txtname('findirets.sj.ci')
if override or (not os.path.exists(ciname)):
ci = UT.chopintervals(sub, ciname, idcol='name')
else:
ci = UT.read_pandas(ciname, names=['chr','st','ed','name','id'])
covciname = fn.txtname('findirets.sj.covci')
if override or (not os.path.exists(covciname)):
covci = CC.calc_cov_mp(ci, fn.bwfile, covciname, np=pr['np'])
else:
covci = UT.read_pandas(covciname)
covci['len'] = covci['ed']-covci['st']
covci['val'] = covci['cov']*covci['len']
covci['sjid'] = covci['name'].apply(lambda x: [int(y) for y in x.split(',')])
cov = UT.flattendf(covci[['id','sjid','len','val']], 'sjid')
covg = cov.groupby('sjid')[['len','val']].sum().reset_index()
covg['cov'] = covg['val']/covg['len']
sj2cov = UT.df2dict(covg, 'sjid','cov')
sj2['cov'] = [sj2cov[x] for x in sj2['_id']]
sj2 = sj2[sj2['cov']>pr['iret_covth']]
# calc me cov
if override or not os.path.exists(fn.txtname('findirets.me.cov')):
LOG.debug('calculating ME cov...')
self.me = me = CC.calc_cov_ovl_mp(
srcname=me,
bwname=fn.bwfile,
dstname=fn.txtname('findirets.me.cov'),
override=override,
np=pr['np'],
covciname=fn.txtname('findirets.me.covci'),
ciname=fn.txtname('findirets.me.ci'),
)
else:
self.me = me = fn.read_txt('findirets.me.cov')
self.irets = irets = sj2
LOG.info('#irets candidates:{0}'.format(len(irets)))
st['FINDIRETS.#irets_sj'] = len(irets)
return self.process_mp(sj2, me, irets)
def process_mp(self, sj, me, irets):
fn = self.fnobj
pr = self.params
covratio = pr['iret_covratio']
covth = pr['iret_covth']
LOG.debug(' preparing data...')
args = []
for chrom in self.chroms(me):
mechr = me[me['chr']==chrom][['chr','st','ed','name','_id','st_id','ed_id','cov']].copy()
sjchr = sj[sj['chr']==chrom][['chr','st','ed','name','_id','st_id','ed_id']].copy()
irchr = irets[irets['chr']==chrom][['chr','ovlratio','strand','st_id','ed_id','cov']]
args.append((sjchr,mechr,irchr,chrom,covratio,covth))
rslts = UT.process_mp(findirets, args, pr['np'])
# rslts = []
# np = pr['np']
# if np==1:
# for i,arg in enumerate(args):
# tmp = findirets(arg)
# LOG.debug(' processing {3}: {0}/{1} {2}...'.format(i+1,len(args),len(tmp),arg[3]))
# rslts += tmp
# else:
# try:
# p = multiprocessing.Pool(np)
# tmp = p.map(findirets, args)
# finally:
# LOG.debug(' closing pool')
# p.close()
# rslts = reduce(iadd, tmp)
cols = GGB.BEDCOLS[:6] # ['chr','st','ed','name','sc1','strand']
self.stats['FINDIRETS.#irets_ex'] = len(rslts)
if len(rslts)>0:
df = PD.DataFrame(rslts, columns = cols)
dfname = fn.write_bed(df, 'findirets.irets', ncols=6)
LOG.info('{0} irets found'.format(len(df)))
return df
LOG.warning('******************** NO IRET FOUND!***********************')
return PD.DataFrame(N.zeros((0,len(cols))),columns=cols) # return empty dataframe
def findirets(sj,me,irets,chrom,covratio,covth):
mg = GP.MEGraph2(sj,me)
# turn irets junction into iret exons
def _iter_irets():
for chrom,ratio,strand,st_id,ed_id,cov in UT.izipcols(irets,['chr','ovlratio','strand','st_id','ed_id','cov']):
for st,namel,covl in mg.sj_leftex(st_id,flds=['st','name','cov']):
for ed,namer,covr in mg.sj_rightex(ed_id,flds=['ed','name','cov']):
if ((2*cov > (covr+covl)*covratio)) & (cov>covth):
#if (cov > min(covr,covl)*covratio) & (cov>covth):
name = namel+'_iret_'+namer
yield (chrom,int(st),int(ed),name,ratio,strand)
cols = GGB.BEDCOLS[:6] # ['chr','st','ed','name','sc1','strand']
recs = [x for x in _iter_irets()]
#LOG.debug('{0}: len:{1}'.format(chrom,len(recs)))
return recs
class FINDSE(SUBASE):
"""Find single exons.
Args:
me: exon DataFrame
edgefixer: EdgeFixer instance
secovth: se coverage threshold
Returns:
:ae: all exons
:se: single exons
:me: multi-exons
Related Parameters:
* minsecovth: minimum single exon coverage (normalized to million alignments)
default {minsecovth} (merging: {minsecovth_m})
* secovth: default SE cov threshold if not using adaptive version
default {secovth} (merging: {secovth_m})
* se_gap: single exon gap fill, default {se_gap}
* se_binth: coverage threshold for SE finding, default {se_binth} (merging: {se_binth_m})
* se_sizeth: single exon size th, default {se_sizeth} (merging: {se_sizeth_m})
TempFiles:
* se.cov.tmp.txt.gz
* se.cov.all.txt.gz
* secov.txt.gz
* se.bed.gz
* exons.afterfindse.txt.gz
* me.exons.bed.gz
"""
def call(self):
me = self.asm.me
edgefixer = self.asm.edgefixer
secovth = self.asm.secovth
fn = self.fnobj
pr = self.params
st = self.stats
secovth = max(pr['minsecovth'], secovth)
st['FINDSE.secovth'] = secovth
gap = pr['se_gap']
binth = pr['se_binth']
sesizeth = pr['se_sizeth']
override = pr['override']
fname = fn.bedname('findse.se')
if override or (not os.path.exists(fname)):
LOG.debug(' finding SE...')
se0 = self.find_se(me, covth=secovth,gap=gap,binth=binth,sizeth=sesizeth)
else:
LOG.debug(' reading cached SE {0}...'.format(fname))
se0 = GGB.read_bed(fname)
if len(se0)==0:
#return ae, se, me2
fn.write_bed(me, 'assemble.exons0', ncols=6)
UT.set_ids(me)
LOG.info('write exons ...=> assemble.exons0.txt.gz')
fn.write_txt(me, 'assemble.exons0', fm='h')
self.asm.ae = me
self.asm.se = se0
self.asm.me = me
return
UT.set_ptyp(se0)
idx = se0['ptyp']=='s'
seme = se0[~idx] # 3' or 5' exons
# fix 3' exons
LOG.debug(' fixing 3pr extension 2nd phase ...')
seme2 = edgefixer.fixSEedge(me, seme, utr='3pr')
seme2 = edgefixer.fixSEedge(me, seme2, utr='5pr')
#seme2 = edgefixer.fixSEedge(me, seme2, utr='se')
se = se0[idx]
st['FINDSE.#se'] = len(se)
cols = GGB.BEDCOLS[:6]+['ptyp']
me2 = PD.concat([me[cols],seme2[cols]], ignore_index=True)
ae = PD.concat([me2[cols],se[cols]], ignore_index=True)
#return ae, se, me2
fn.write_bed(ae, 'assemble.exons0', ncols=6)
UT.set_ids(ae)
LOG.info('write exons ...=> assemble.exons0.txt.gz')
fn.write_txt(ae, 'assemble.exons0', fm='h')
self.asm.ae = ae
self.asm.se = se
self.asm.me = me2
def find_se(self, exons, covth=0.5,gap=50,binth=0,sizeth=200,override=False):
# from bin0gap50 subtract ME choose >200bp and cov>1
# covarage file
LOG.debug(' finding SE candidates...')
fn = self.fnobj
pr = self.params
st = self.stats
fname = fn.txtname('se.cov.tmp')
aname = fn.txtname('se.cov.all')
override = pr['override']
if (not override) and (os.path.exists(fname)):
LOG.debug(' reading cached SECOV {0}...'.format(fname))
secov = UT.read_pandas(fname)
secov['len'] = secov['ed']-secov['st']
secov = secov[secov['len']>sizeth]
elif os.path.exists(aname):
# use cov calculated at FINDSECOVTH
secov = UT.read_pandas(aname)
secov['len'] = secov['ed']-secov['st']
secov = secov[secov['len']>sizeth]
else:
# if not using FINDSECOV then just calculate len>sizeth
LOG.debug(' calculating SECOV...')
binfile = self.bw2bed(binth)
gapfile = self.fillgap(gap, binfile)
mefile = fn.write_bed(exons, 'findse.me.exons', ncols=3)
# subtract me from gapfilled
LOG.debug(' calculating coverage...')
cname = fn.bedname('findse.gap-sub-me') #os.path.join(MD.SJEXDIR,sname+'.gap-sub-me.bed.gz')
if override or (not os.path.exists(cname)):
BT.bedtoolsubtract(gapfile, mefile, cname)
df = GGB.read_bed(cname)
df['len'] = df['ed'] - df['st']
df = df[df['len']>sizeth]
#secov = BW.calc_cov(df, bwfile, fname) # ~30-40 sec
secov = CC.calc_cov_mp(df, fn.bwfile, fname, pr['np']) # here secovtmp is saved
os.unlink(mefile)
secov = secov[secov['cov']>pr['minsecovth']]
# use lower threshold here so that you don't lose non-SE (i.e. alternative 3',5' UTR)
# but can't process all of SE candidate since it will take forever, need to restrict to
# reasonable amount => minsecovth
# select SE with proper secovth later at SELECTSEME
fn.write_txt(secov, 'findse.secov', fm='h')
LOG.info('#candidate SE:{0}'.format(len(secov)))
st['FINDSE.#candidate_se'] = len(secov)
# see if additional filtering is necessary for intronic SE
# ==> covth 0.5 seems to be OK, ~4K overall
# long ones are 3'UTR ==> needs to be fixed
LOG.debug('fixing 3pr extension...')
cols = GGB.BEDCOLS[:6]
recs = [x for x in self._fix_se_ext(exons, secov)]
LOG.info('#candidate SE after _fix_se_ext:{0}'.format(len(recs)))
se = PD.DataFrame(recs,columns=cols)
st['FINDSE.#candidate_se_after_fix_se_ext'] = len(se)
fn.write_bed(se, 'findse.se', ncols=6)
return se
def _fix_se_ext(self, exons, secov):
# generator
# go through se and see if it is attached to 5' or 3'of me exon
# if so create extended exon
# otherwise just spit out itself
est = exons.set_index(['chr','st'])
eed = exons.set_index(['chr','ed'])
def _get(tgt,chrom,pos):
try:
test0 = tgt.ix[chrom]
test1 = test0.ix[pos] # if no match => except
return test0.ix[[pos]] # always return DataFrame
except: # constructing DataFrame for Null cases seems to take a lot of time
return None
LOG.info(' processing {0} SE candidates'.format(len(secov)))
for i,(chrom,st,ed,cov) in enumerate(UT.izipcols(secov, ['chr','st','ed','cov'])):
le = _get(eed,chrom,st) # for 3utr + strand find exons on left eed==st
re = _get(est,chrom,ed) # for 3utr - strand find exons on right est==ed
if (le is not None) and (re is not None):
pass
# this should have been detected in iret
# for nal,srl,e_st in le[['name','strand','st']].values:
# for nar,srr,e_ed in re[['name','strand','ed']].values:
# if srl==srr:
# name = nal+'|SE{0}|'.format(i)+nar
# yield (chrom,e_st,e_ed,name,cov,sr)
elif le is not None:# chr, st, ed, name, sc1, strand
for nal,srl,e_st in le[['name','strand','st']].values:
name = nal+'|SE{0}'.format(i)
yield (chrom,e_st,ed,name,cov,srl)
elif re is not None:
for nar,srr,e_ed in re[['name','strand','ed']].values:
name = 'SE{0}|'.format(i)+nar
yield (chrom,st,e_ed,name,cov,srr)
else:
yield (chrom,st,ed,'SE{0}'.format(i),cov,'.')
def _fix_se_3prext(self, exons, secov):
# generator
# go through se and see if it is attached to 3'of me exon
# if so create extended 3pr end exon
# otherwise just spit out itself
est = exons.set_index(['chr','strand','st'])
eed = exons.set_index(['chr','strand','ed'])
def _get(tgt,chrom,strand,pos):
try:
return tgt.ix[chrom].ix[strand].ix[pos]
except:
return None
for chrom,st,ed,cov in secov[['chr','st','ed','cov']].values:
le = _get(eed,chrom,'+',st) # for 3utr + strand find exons on left eed==st
re = _get(est,chrom,'-',ed) # for 3utr - strand find exons on right est==ed
if (le is not None) or (re is not None):
if le is not None:# chr, st, ed, name, sc1, strand
if len(le.shape)==1:
name = le['name']+'|SE'
yield (chrom,le['st'],ed,name,cov,'+')
else:
for j,e in le.iterrows():
name = e['name']+'|SE'
yield (chrom,e['st'],ed,name,cov,'+')
if re is not None:
if len(re.shape)==1:
name = 'SE|'+re['name']
yield (chrom,st,re['ed'],name,cov,'-')
else:
for j,e in re.iterrows():
name = 'SE|'+e['name']
yield (chrom,st,e['ed'],name,cov,'-')
else:
yield (chrom,st,ed,'SE',cov,'.')
class FIND53IR(SUBASE):
"""Find 5',3' exon (cut), intron retentions and single exons.
Args:
me: exon DataFrame
Returns:
:ae: exons with 5',3' cuts, irets, and single exons.
Related Parameters:
* minsecovth: minimum single exon coverage (normalized to million alignments)
default {minsecovth} (merging: {minsecovth_m})
* secovth: default SE cov threshold if not using adaptive version
default {secovth} (merging: {secovth_m})
* se_gap: single exon gap fill, default {se_gap}
* se_binth: coverage threshold for SE finding, default {se_binth} (merging: {se_binth_m})
* se_sizeth: single exon size th, default {se_sizeth} (merging: {se_sizeth_m})
* find53ir_covratio: cov ratio threshold for FIND53IR,
default {find53ir_covratio}, (merging: {find53ir_covratio_m})
* find53ir_covth: cov threshold for FIND53IR
default {find53ir_covth}, (mergin: {find53ir_covth_m})
TempFiles:
* se.cov.tmp.txt.gz
* se.cov.all.txt.gz
* me.exons.bed.gz
* bw*.bed.gz
* gap-sub-me.bed.gz
* assemble.exons0.txt.gz
* assemble.exons0.bed.gz
"""
def call(self):
me = self.asm.me
fn = self.fnobj
pr = self.params
st = self.stats
secovth = max(pr['minsecovth'], pr['secovth'])
st['FINDSE.secovth'] = secovth
override = pr['override']
fname = fn.bedname('find53ir')
if override or (not os.path.exists(fname)):
LOG.debug(' finding 53IR...')
ae = self.find(me, secovth)#,gap=gap,binth=binth,sizeth=sesizeth)
else:
LOG.debug(' reading cached 53IR {0}...'.format(fname))
ae = GGB.read_bed(fname)
#return ae
self.asm.ae = ae
fn.write_bed(ae, 'assemble.exons0', ncols=6)
UT.set_ids(ae)
LOG.info('write exons ...=> assemble.exons0.txt.gz')
fn.write_txt(ae, 'assemble.exons0', fm='h')
def find(self, me, secovth):
# TODO fixedge dse as well?
fn = self.fnobj
pr = self.params
st = self.stats
gap = pr['se_gap']
sizeth = pr['se_sizeth']
override = pr['override']
np = pr['np']
# calc SE candidates (subtract ME) and calc cov <= [TODO] better use separate parameters
# than real SE extraction part
fname = fn.txtname('se.cov.tmp')
aname = fn.txtname('se.cov.all')
if (not override) and os.path.exists(fname):
LOG.info(' reading cached SECOV {0}...'.format(fname))
secov = UT.read_pandas(fname)
secov['len'] = secov['ed']-secov['st']
secov = secov[secov['len']>sizeth]
elif (not override) and os.path.exists(aname):
LOG.info(' reading cached SECOV {0}...'.format(aname))
# use cov calculated at FINDSECOVTH
secov = UT.read_pandas(aname)
secov['len'] = secov['ed']-secov['st']
secov = secov[secov['len']>sizeth]
else:
# if not using FINDSECOV then just calculate len>sizeth
LOG.info(' calculating SECOV...')
binfile = self.bw2bed(pr['se_binth'])
gapfile = self.fillgap(gap, binfile)
mefile = fn.write_bed(me, 'find53ir.me.exons', ncols=3)
LOG.debug(' calculating coverage...')
cname = fn.bedname('find53ir.gap-sub-me')
if override or (not os.path.exists(cname)):
BT.bedtoolsubtract(gapfile, mefile, cname)
df = GGB.read_bed(cname)
df['len'] = df['ed'] - df['st']
df = df[df['len']>sizeth]
secov = CC.calc_cov_mp(df, fn.bwfile, fname, np) # here secovtmp is saved
os.unlink(mefile)
# calc ME cov
if override or not os.path.exists(fn.txtname('find53ir.cov')):
LOG.debug('calculating ME cov...')
UT.set_ids(me)
me = CC.calc_cov_ovl_mp(
srcname=me,
bwname=fn.bwfile,
dstname=fn.txtname('find53ir.cov'),
np=np,
override=override,
covciname=fn.txtname('find53ir.covci'),
ciname=fn.txtname('find53ir.ci'))
else:
me = fn.read_txt('find53ir.cov')
# write BED files and find attaching ME/SE
secov['st-1'] = secov['st']-1
secov['ed+1'] = secov['ed']+1
secov['name'] = ['SE{0}'.format(x) for x in N.arange(len(secov))]
secov['_id2'] = N.arange(len(secov))
secov['strand'] = '.'
secols = ['chr','st-1','ed+1','cov','_id2','name','st','ed']
mecols = ['chr','st','ed','name','_id','cov','strand']
# somehow ['chr','st','ed','name','_id','strand','cov'] this order segfaults
# when intersected with others
mecols2 = ['echr','est','eed','ename','eid','ecov','strand']
a = UT.write_pandas(secov[secols],fn.bedname('find53ir.se'),'')
b = UT.write_pandas(me[mecols],fn.bedname('find53ir.me'),'')
c = fn.txtname('find53ir.ovl')
c = BT.bedtoolintersect(a,b,c,wao=True)
cols = secols+mecols2+['ovl']
d = UT.read_pandas(c, names=cols)
d['attachleft'] = d['ed+1']-1==d['est']
d['attachright'] = d['st-1']+1==d['eed']
d['bound'] = (d['ename'].str.startswith('['))&(d['ename'].str.endswith(']'))
# SE
dse = d[d['echr']=='.'].copy() # no overlap
dse = dse[dse['cov']>secovth] #
dse['sc1'] = dse['cov'] # for writing BED
# ME
dme = d[(d['attachleft']&d['bound'])|(d['attachright']&d['bound'])].copy()
dme['covratio'] = dme['cov'].astype(float)/dme['ecov'].astype(float)
dme = dme[dme['covratio']>pr['find53ir_covratio']] # and substantial portion
# attachleft sid, attachright sid, iret side, etc.
def _lri(strand):
al0p = set(dme[(dme['attachleft'])&(dme['strand']==strand)]['_id2'].values)
ar0p = set(dme[(dme['attachright'])&(dme['strand']==strand)]['_id2'].values)
irp = al0p.intersection(ar0p)
alp = al0p.difference(ar0p)
arp = ar0p.difference(al0p)
return alp,arp,irp
alp,arp,irp = _lri('+')
aln,arn,irn = _lri('-')
ir = irp.union(irn)
al = alp.union(aln)
ar = arp.union(arn)
LOG.debug('#al({0}) #ar({1}) #iret({2})'.format(len(al),len(ar),len(ir)))
# fix edges, no chop necessary but need copy
sei = secov.set_index('_id2')
# cicols: ['chr','st','ed','name','sc1', 'direction']
# mecols: ['chr','st','ed','name','strand','_id2']
# ci.name == str(me._id2)
def _calc(id2s,direction,func):
if len(id2s)==0:
return None
me = sei.ix[id2s].reset_index()[['chr','st','ed','name','strand','_id2']].copy()
ci = me[['chr','st','ed']].copy()
ci['name'] = me['_id2'].astype(str)
ci['sc1'] = me['_id2']
ci['direction'] = direction
return self.process_mp(ci,me,func)
alr = _calc(al,'-',fixedges_m)
arr = _calc(ar,'+',fixedges_m)
irr = _calc(ir,'.',cutedges_m)
# returned ['chr','st','ed','name','sc1','strand','_id2']
# attach exons
dmi = dme.set_index('_id2')
i2c = UT.df2dict(secov, '_id2','cov')
def _makedics(id2s, boolcol):
t = dmi.ix[id2s].reset_index()
t = t[t[boolcol]]
tg = t.groupby('_id2')['eid'].apply(lambda x: list(x)).reset_index()
s2eids = UT.df2dict(tg, '_id2','eid')
tg2 = t.groupby('eid').first()
e2row = dict(zip(tg2.index.values, UT.izipcols(tg2,['est','eed','ename','strand'])))
return s2eids, e2row
def _algen():
if alr is None:
return
s2eids,e2row = _makedics(al,'attachleft')
for c,s,e,n,i2 in UT.izipcols(alr,['chr','st','ed','name','_id2']):
# SE|[mel] # (chr,sst,eed,sn|en,i2,estrand)
cov = i2c[i2]
for eid in s2eids[i2]: # erow: st,ed,name,strand
erow = e2row[eid]
name = n+'|'+erow[2]
yield (c,s,erow[1],name,cov,erow[3])
def _argen():
if arr is None:
return
s2eids,e2row = _makedics(ar,'attachright')
for c,s,e,n,i2 in UT.izipcols(arr,['chr','st','ed','name','_id2']):
# [mer]|SE, (chr,est,sed,name,i2,estrand)
cov = i2c[i2]
for eid in s2eids[i2]: # erow: st,ed,name,strand
erow = e2row[eid]
name = erow[2]+'|'+n
yield (c,erow[0],e,name,cov,erow[3])
def _irgen():
if irr is None:
return
s2le,le2row = _makedics(ir,'attachleft')
s2re,re2row = _makedics(ir,'attachright')
for c,s,e,n,i2 in UT.izipcols(irr,['chr','st','ed','name','_id2']):
# [mer]|SE|[mel]
cov = i2c[i2]
for eidl in s2le[i2]:
erowl = le2row[eidl]
for eidr in s2re[i2]:
erowr = re2row[eidr]
if erowl[3]==erowr[3]:
name = erowr[2]+'|'+n+'|'+erowl[2]
yield (c,erowr[0],erowl[1],name,cov,erowl[3])
alrecs = [x for x in _algen()]
arrecs = [x for x in _argen()]
irrecs = [x for x in _irgen()]
cols = ['chr','st','ed','name','sc1','strand'] # se cov in 'sc1'
medf =
|
PD.DataFrame(alrecs+arrecs+irrecs,columns=cols)
|
pandas.DataFrame
|
from api.common.JQSDK_Connect import JQSDK_Connect
from api.common.Saver import Saver
from jqdatasdk import *
from request.JoinQuant.All_Securities import All_Securities
from request.JoinQuant.Base_Object import Base_Object
import pandas as pd
from api.common.Query import Query
class Daily_Securities_Price(Base_Object):
def __init__(self):
self.sql_table_name = "jq_daily_securities_price"
def save_by_query_date(self,securities_list=None,start_date='',end_date=''):
if securities_list is None:
all_Securities = All_Securities()
df = all_Securities.query_by_last()
securities_list = list(df['code'])
fields = ['open', 'close', 'low', 'high', 'volume', 'money', 'factor', 'high_limit','low_limit', 'avg', 'pre_close', 'paused', 'open_interest']
df_price = get_price(securities_list, start_date=start_date, end_date=end_date, frequency='1d',fields = fields, \
skip_paused=False, fq='post', panel=False)
df_price = df_price.rename({"time":"date"},axis=1)
df_price['date'] = df_price['date'].astype(str)
Saver.save_to_mysql(df_price,self.sql_table_name)
def get_pre_adjust_daily_price(self,start_date='',end_date='',type='stock'):
all_Securities = All_Securities()
_df = all_Securities.query_by_condition({"date":end_date,"type":type})
securities_list = list(_df['code'])
df_enddate_price = self.query_by_condition(condition={"date":end_date,"code":securities_list})
df_enddate_price = df_enddate_price.rename({"factor":"last_factor"},axis=1)
df_all_price = self.query_by_condition(condition={"date": (start_date,end_date),"code":securities_list})
df_merged =
|
pd.merge(df_all_price,df_enddate_price[['code','last_factor']],how='left',on=['code'])
|
pandas.merge
|
"""
dlc2kinematics
© <NAME>
https://github.com/AdaptiveMotorControlLab/dlc2kinematics/
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import os
from scipy.signal import savgol_filter
from sklearn.decomposition import PCA
from dlc2kinematics.utils import auxiliaryfunctions
def load_joint_angles(data):
"""
Loads the joint angles which are computed by dlc2kinematics.compute_joint_angles() and stored as pandas dataframe
Parameters
----------
data: string
Full path of the pandas array(.h5) file as a string.
Outputs
-------
df: dataframe
Example
-------
Linux/MacOs
>>> joint_angle = dlc2kinematics.load_joint_angles('/home/data/joint_angle.h5')
Windows
>>> joint_angle = dlc2kinematics.load_joint_angles('C:\\yourusername\\rig-95\\joint_angle.h5')
"""
joint_angle = pd.read_hdf(data)
return joint_angle
def compute_joint_angles(
df,
joints_dict,
save=True,
destfolder=None,
output_filename=None,
dropnan=False,
smooth=False,
filter_window=3,
order=1,
pcutoff=0.4,
):
"""
Computes the joint angles for the bodyparts.
Parameters
----------
df: Pandas multiindex dataframe which is the output of DeepLabCut. Assumes the the dataframe is already smoothed. If not, adjust the filter_window and order to smooth the dataframe.
joints_dict: Dictionary
Keys of the dictionary specifies the joint angle and the corresponding values specify the bodyparts. e.g.
joint_dict = {'R-Elbow': ['R_shoulder', 'Right_elbow', 'Right_wrist']}
save: boolean
Optional. Saves the joint angles as a pandas dataframe if set to True.
destfolder: string
Optional. Saves the joint angles in the specfied destination folder. If it is set to None, the joint angles are saved in the current working directory.
output_filename: string
Optional. Name of the output file. If it is set to None, the file is saved as joint_angles_<scorer_name>.h5, <scorer_name> is the name of the scorer in the input df.
dropnan: boolean
Optional. If you want to drop any NaN values, this is useful for some downstream analysis (like PCA).
smooth: boolean
Optional. If you want to smooth the data with a svagol filter, you can set this to true, and then also add filter_window and order.
filter_window: int
Optional. If smooth=True, window is set here, which needs to be a positive odd integer.
order: int
Optional. Only used if the optional argument `smooth` is set to True. Order of the polynomial to fit the data. The order must be less than the filter_window
pcutoff: float
Optional. Specifies the likelihood. All bodyparts with low `pcutoff` (i.e. < 0.4) are not used to compute the joint angles. It is only useful when computing joint angles from 2d data.
Outputs
-------
joint_angles: dataframe of joint angles
Example
-------
>>> joint_angles = dlc2kinematics.compute_joint_angles(df,joint_dict)
"""
flag, _ = auxiliaryfunctions.check_2d_or_3d(df)
is_multianimal = "individuals" in df.columns.names
if flag == "2d" and pcutoff:
def filter_low_prob(cols, prob):
mask = cols.iloc[:, 2] < prob
cols.loc[mask, :2] = np.nan
return cols
df = df.groupby("bodyparts", axis=1).apply(filter_low_prob, prob=pcutoff)
angle_names = list(joints_dict)
if not destfolder:
destfolder = os.getcwd()
if not output_filename:
output_filename = (
"joint_angles_" + df.columns.get_level_values("scorer").unique()[0]
)
filepath = os.path.join(destfolder, output_filename + ".h5")
if os.path.isfile(filepath):
print("File already present. Reading %s" % output_filename)
angles = pd.read_hdf(filepath)
if not all(angles.columns.isin(angle_names)):
raise IOError(
"The existing file has different joint angles than specified "
"in the dictionary joints_dict. "
"Please delete the existing file and try again!"
)
else:
angles = angles.loc[:, angle_names]
else:
angles = dict()
for joint, bpts in joints_dict.items():
print(f"Computing joint angles for {joint}")
mask = df.columns.get_level_values("bodyparts").isin(bpts)
temp = df.loc[:, mask]
if is_multianimal:
for animal, frame in temp.groupby(level="individuals", axis=1):
angles[f"{joint}_{animal}"] = frame.apply(
auxiliaryfunctions.jointangle_calc, axis=1
).values
else:
angles[joint] = temp.apply(
auxiliaryfunctions.jointangle_calc, axis=1
).values
angles = pd.DataFrame.from_dict(angles)
if dropnan:
angles.dropna(inplace=True)
if smooth:
angles[:] = savgol_filter(angles, filter_window, order, deriv=0, axis=0)
if save:
print(f"Saving the joint angles as a pandas array in {destfolder}")
angles.to_hdf(
filepath,
"df_with_missing",
format="table",
mode="w",
)
return angles
def compute_joint_velocity(
joint_angle,
filter_window=3,
order=1,
save=True,
destfolder=None,
output_filename=None,
dropnan=False,
):
"""
Computes the joint angular velocities.
Parameters
----------
joint_angle: Pandas dataframe of joint angles. You can also pass the full path of joint angle filename as a string.
filter_window: int
Optional. The length of filter window which needs to be a positive odd integer
order: int
Optional. Order of the polynomial to fit the data. The order must be less than the filter_window
save: boolean
Optional. Saves the joint velocity as a pandas dataframe if set to True.
destfolder: string
Optional. Saves the joint velocity in the specfied destination folder. If it is set to None, the joint velocity are saved in the current working directory.
output_filename: string
Optional. Name of the output file. If it is set to None, the file is saved as joint_angular_velocity.h5.
dropnan: boolean
Optional. If you want to drop any NaN values, this is useful for some downstream analysis (like PCA).
Outputs
-------
joint_vel: dataframe of joint angular velocity
Example
-------
>>> joint_vel = dlc2kinematics.compute_joint_velocity(joint_angle)
"""
try:
joint_angle = pd.read_hdf(joint_angle, "df_with_missing")
except:
pass
temp = savgol_filter(joint_angle, filter_window, order, axis=0, deriv=1)
angular_vel = pd.DataFrame(
temp, columns=joint_angle.columns, index=joint_angle.index
)
if not destfolder:
destfolder = os.getcwd()
if not output_filename:
output_filename = "joint_angular_velocity"
if dropnan:
print("Dropping the indices where joint angular velocity is nan")
angular_vel.dropna(inplace=True)
if save:
print("Saving the joint angular velocity as a pandas array in %s " % destfolder)
angular_vel.to_hdf(
os.path.join(destfolder, output_filename + ".h5"),
"df_with_missing",
format="table",
mode="w",
)
return angular_vel
def compute_joint_acceleration(
joint_angle,
filter_window=3,
order=2,
save=True,
destfolder=None,
output_filename=None,
dropnan=False,
):
"""
Computes the joint angular acceleration.
Parameters
----------
joint_angle: Pandas dataframe of joint angles. You can also pass the full path of joint angle filename as a string.
filter_window: int
Optional. The length of filter window which needs to be a positive odd integer.
order: int
Optional. Order of the polynomial to fit the data. The order must be less than the filter_window.
save: boolean
Optional. Saves the joint acceleration as a pandas dataframe if set to True.
destfolder: string
Optional. Saves the joint acceleration in the specfied destination folder. If it is set to None, the joint acceleration are saved in the current working directory.
output_filename: string
Optional. Name of the output file. If it is set to None, the file is saved as joint_angular_acceleration.h5
dropnan: boolean
Optional. If you want to drop any NaN values, this is useful for some downstream analysis (like PCA).
Outputs
-------
joint_acc: dataframe of joint angular acceleration.
Example
-------
>>> joint_acc = dlc2kinematics.compute_joint_acceleration(joint_angle)
"""
try:
joint_angle =
|
pd.read_hdf(joint_angle, "df_with_missing")
|
pandas.read_hdf
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 12 15:29:47 2019
@author: aakas
"""
import pandas as pd
import time
from time import sleep
from selenium.webdriver.common.by import By
from selenium import webdriver
from pandas import ExcelWriter
from selenium.common.exceptions import NoSuchElementException
'''URL of website'''
url='https://www.bls.gov/ooh/'
#Lists to store scraped data
occupation_list=[]
job_summary_list=[]
salary_list=[]
ed_requirement_list=[]
work_ex_list=[]
num_jobs_list=[]
job_outlook_list=[]
employment_change_list=[]
similar_occupation_list=[]
occ_group_list= ['Architecture-and-Engineering','Arts-and-Design','Building-and-Grounds-Cleaning',
'Business-and-Financial','Community-and-Social-Service','Computer-and-Information-Technology',
'Construction-and-Extraction','Education-Training-and-Library','Entertainment-and-Sports',
'Farming-Fishing-and-Forestry','Food-Preparation-and-Serving,','Healthcare',
'Installation-Maintenance-and-Repair','Legal','Life, Physical-and-Social-Science',
'Management','Math','Media-and-Communication','Military','Office-and-Administrative-Support',
'Personal-Care-and-Service','Production','Protective-Service','Sales','Transportation-and-Material-Moving']
# Start timer
Start_time= time.time()
#buetiful soup url parsing and get url
driver=webdriver.Chrome()
for i in range(len(occ_group_list)):
occupation_gr=occ_group_list[i]
url_add=url+str(occupation_gr)+'/'
driver.get(url_add)
n=1
while True:
print("in while")
try:
print("in try")
'''job summary'''
job_summary_xpath='//*[@id="landing-page-table"]/tbody/tr['+str(n)+']/td[3]/p'
summary=driver.find_element_by_xpath(job_summary_xpath).text
job_summary_list.append(summary)
print(summary)
'''Occupation Name'''
element_xpath='//*[@id="landing-page-table"]/tbody/tr['+str(n)+']/td[2]'
occupation=driver.find_element_by_xpath(element_xpath).text
occupation_list.append(occupation)
print(occupation)
'''New page click'''
driver.find_element_by_xpath(element_xpath).click()
'''median Salary'''
element_xpath='//*[@id="quickfacts"]/tbody/tr[1]/td'
salary=driver.find_element_by_xpath(element_xpath).text
salary_list.append(salary)
'''Entry Level Education required'''
element_xpath='//*[@id="quickfacts"]/tbody/tr[2]/td'
required_edu=driver.find_element_by_xpath(element_xpath).text
ed_requirement_list.append(required_edu)
'''Work Ex'''
element_xpath='//*[@id="quickfacts"]/tbody/tr[3]/td'
work_ex=driver.find_element_by_xpath(element_xpath).text
work_ex_list.append(work_ex)
'''Job Outlook'''
element_xpath='//*[@id="quickfacts"]/tbody/tr[6]/td'
job_outlook=driver.find_element_by_xpath(element_xpath).text
job_outlook_list.append(job_outlook)
'''employment change'''
element_xpath='//*[@id="quickfacts"]/tbody/tr[7]/td'
emp_change=driver.find_element_by_xpath(element_xpath).text
employment_change_list.append(emp_change)
n=n+1
except NoSuchElementException:
print("Exception" + occupation )
break
'''Consolidation of all list into pandas dataframe'''
df1 = pd.DataFrame(occupation_list, columns=['Occupation'])
df2=pd.DataFrame(job_summary_list, columns=['Job Summary'])
df3=
|
pd.DataFrame(salary_list, columns=['Median Salary'])
|
pandas.DataFrame
|
import sys
import argparse
import numpy as np
import pandas as pd
from seekr import fasta
from seekr import graph
from seekr import pearson
from seekr.kmer_counts import BasicCounter, Log2
# TODO (Dan) fix names
from seekr.pwm import CountsWeighter
DOWNLOAD_GENCODE_DOC = """
Description
-----------
Download fasta files from https://www.gencodegenes.org/
The one parameter that must be passed is 'biotype'.
Its value must be one of:
* 'all' : Nucleotide sequences of all transcripts on the reference chromosomes
* 'pc' : Nucleotide sequences of coding transcripts on the reference chromosomes
* 'lncRNA' : Nucleotide sequences of long non-coding RNA transcripts on the reference chromosomes
Examples
--------
To download all human transcripts of the latest release into a fasta file:
$ seekr_download_gencode all
To do the same for mouse:
$ seekr_download_gencode all -s mouse
To get lncRNAs from the M5 release of mouse:
$ seekr_download_gencode lncRNA -s mouse -r M5
If you want to leave the fasta file gzipped:
$ seekr_download_gencode all -z
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
CANONICAL_GENCODE_DOC = """
Description
-----------
Filter GENCODE fasta file for only transcripts ending in 01.
This is based on the common names provided by GENCODE.
No strict guarantees are made about the relationship between genes and transcripts.
Examples
--------
To filter transcripts ending in 01, an input and output fasta file are required:
$ seekr_canonical_gencode rnas.fa rnas01.fa
If you want to specifically find transcripts with the ending 001:
$ seekr_canonical_gencode rnas.fa rnas01.fa -z 2
To enforce one isoform per ENSG id (specifically, the smallest 01 isoform):
$ seekr_canonical_gencode rnas.fa rnas01_1per.fa -u
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
KMER_COUNTS_DOC = """
Description
-----------
Generates a kmer count matrix of m rows by n columns,
where m is the number of transcripts in a fasta file and n is 4^kmer.
Examples
--------
The default settings take a .fa file and produce a labeld csv file:
$ seekr_kmer_counts rnas.fa -o out.csv
To get a compact and efficient .npy file, set the binary flag:
$ seekr_kmer_counts rnas.fa -o out.npy -b
You can change also change the size of the kmer you're using, and prevent normalization:
$ seekr_kmer_counts rnas.fa -o out.csv -k 4 -uc -us -nl
If you ever do not want labels on a csv file:
$ seekr_kmer_counts rnas.fa -o out.csv -rl
Notes
-----
For more sophisticated options, you cannot use the command line, but need python instead.
To pass --log 1 argument for pre-zscore log-transform of k-mer counts, seekr_norm_vectors MUST be
run with the -cl flag. This log transforms the reference counts for appropriate mean and std calcs
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
PEARSON_DOC = """
Description
-----------
Generate a matrix of Pearson similarities from two kmer count files.
Examples
--------
The default settings accept two csv files and output a third csv file.
$ seekr_pearson kc_out.csv kc_out.csv -o out.csv
The only other options besides the `-o` flag control binary versus plain text input and output.
If you have a binary input file (i.e. a .npy file) and also want a binary output file, you can do:
$ seekr_pearson kc_out.npy kc_out.npy -o out.npy -bi -bo
Notes
-----
For more sophisticated options, you cannot use the command line, but need python instead.
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
VISUALIZE_DISTRO_DOC = """
Description
-----------
Generate an image showing the distribution of all Pearson r-values.
This can be useful for determining a threshold for the adjacency matrix.
Examples
--------
You must pass an adjacency matrix and an output path.
$ seekr_visualize_distro adj.csv adj.pdf
For large arrays, it's likely sufficient to visualize a portion of the adjacency matrix.
You can pass a float between 0 and 1 to the `-s` flag:
$ seekr_visualize_distro adj.csv adj.pdf -s .1
Notes
-----
For more sophisticated options, you cannot use the command line, but need python instead.
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
NORM_VECTORS_DOC = """
Description
-----------
Generate two .npy files from a .fa file to use as normalization vectors for other .fa files.
Examples
--------
The default setting accept a single fasta file.
$ seekr_norm_vectors gencode.fa
If you want to specify paths for the output files, or choose a different kmer size:
$ seekr_norm_vectors gencode.fa -k 5 -mv mean_5mers.npy -sv std_5mers.npy
If pre-zscore log transform is desired, you must pass the `--log2 pre` flag to log transform
the reference k-mer counts
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
GEN_RAND_RNAS_DOC = """
Description
-----------
Given a .fa file, create a second .fa file with random RNAs based on original RNAs.
Users control how similar the synthetic RNAs are to the originals.
Gross scale of kmer content can be controlled by setting kmer conservation size.
Fine scale control of similarity can be set by number of SNP mutations.
Examples
--------
The two required positional arguments are an input and output path to fasta files.
This will shuffle the nucleotides for each RNA:
$ seekr_gen_rand_rnas rnas.fa rnas_rand.fa
To conserve kmer content for k > 1, choose a different kmer size:
$ seekr_gen_rand_rnas rnas.fa rnas_rand.fa -k 2
Shuffling kmers is random. To reproduce output between runs, set a seed:
$ seekr_gen_rand_rnas rnas.fa rnas_rand.fa -s 0
It may be useful to conserve the kmer content of the whole fasta file.
Setting the `--group` flag loses conservation of individual sequences,
in preference for producing RNAs with a kmer frequency equal to background frequency.
Using `--group` still produces the same number of output RNAs as input.
Note: this may segfault on large fasta files.
$ seekr_gen_rand_rnas rnas.fa rnas_rand.fa -k 2 -g
In some cases, it is useful to have more fine-grained control over final kmer content.
Ex: when conserving large kmers, it may be impossible to shuffle shorter seqs.
Ex: if you want to produce a sequence with an exact Pearson's r-value to the original.
A number of random SNP mutations can be made in addition to shuffling.
Use the --mutation flag to set the approximate number of SNP mutations.
Note: Because the new nucleotide may be the same as the old, -m is approximate.
$ seekr_gen_rand_rnas rnas.fa rnas_rand.fa -m 100
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
GRAPH_DOC = """
Description
-----------
Find communities of transcripts from an adjacency matrix.
Examples
--------
Default setting accept a csv file, and a threshold value.
The csv file should be the product of seekr_pearson, or some other adjacency matrix.
The threshold is the value below which edges are removed from the graph.
seekr_pearson_distro can be run to suggest a value for the threshold.
A gml file contain the graph and communities will be produced.
$ seekr_graph adj.csv .13 -g graph.gml
For a cleaner csv file of just community information:
$ seekr_graph adj.csv .5 -g graph.gml -c communities.csv
To change the resolution parameter (gamma) for louvain/leidenalg:
$ seekr_graph adj.csv .1 -g graph.gml -r 1.5
To change the cap of the number of communities found, and set the seed:
$ seekr_graph adj.csv .1 -g graph.gml -n 10 -s 0
Numpy files are also valid input:
$ seekr_graph adj.npy .1 -g graph.gml
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
PWM_DOC = """
Description
-----------
Weight kmer profiles by protein binding PWMs to infer protein binding likelihood.
Examples
--------
A standard run of this tool needs three things:
1. A directory of PWM files
2. A counts file (produced from seekr_kmer_counts)
3. An output path
$ seekr_pwm path/to/pwms/ kc_out.csv -o pwm_weight_sums.csv
Numpy files can also be passed as input, but .csv files are the only output:
$ seekr_pwm path/to/pwms/ kc_out.npy -o pwm_weight_sums.csv
The kmer size can also be passed.
It should match the counts file.
Unlike most other seekr tools k=5 is the default for this tool.
$ seekr_pwm path/to/pwms/ kc_out.npy -k 6 -o pwm_weight_sums.csv
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
DOMAIN_PEARSON_DOC = """
Description
-----------
# Find domains of similarity between query transcripts and tiles of target transcripts.
Examples
--------
This tool requires several pieces of data:
1. A fasta file containing query sequences.
2. A second fasta file containing target sequences which will be tiled.
3. A mean vector for normalization (e.g. from `seekr_norm_vectors`).
4. A std vector for standardization (e.g. from `seekr_norm_vectors`).
For brevity in the documentation below,
we will assume that these required data have been stored in a variable:
$ REQUIRED="queries.fa targets.fa mean.npy std.npy"
To see the r-values, pass a location for storing them in a csv file.
$ seekr_domain_pearson $REQUIRED -r r_values.csv
Intepretation of r-value elements can be aided by viewing them as percentiles.
If you want percentiles, you must also pass a reference fasta path:
$ seekr_domain_pearson $REQUIRED -r r_values.csv -p percentiles.csv -rp reference.fa
Parameters you might pass to `seekr_kmer_counts` can also be passed.
If you change --kmer, ensure that your mean.npy and std.npy files match:
$ seekr_domain_pearson $REQUIRED -r r_values.csv -nl -k 5
You can also change the size of the domain,
and how far you slide along the target sequence before creating another domain:
$ seekr_domain_pearson $REQUIRED -r r_values.csv -w 1200 -s 150
Issues
------
Any issues can be reported to https://github.com/CalabreseLab/seekr/issues
---
"""
def _parse_args_or_exit(parser):
""""""
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
return parser.parse_args()
def _run_download_gencode(biotype, species, release, out_path, unzip):
# Note: This function is separated for testing purposes.
downloader = fasta.Downloader()
downloader.get_gencode(biotype, species, release, out_path, unzip)
def console_download_gencode():
assert sys.version_info[0] == 3, 'Python version must be 3.x'
parser = argparse.ArgumentParser(usage=KMER_COUNTS_DOC,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('biotype',
help=("Name of Genocde set to download. "
"Must be one of ('all', 'pc', 'lncRNA')."))
parser.add_argument('-s', '--species', default='human',
help=" Name of species. Must be one of: ('human' or 'mouse').")
parser.add_argument('-r', '--release', default=None,
help=("Name of specific release to download (e.g. 'M5'). "
"If None, download latest release."))
parser.add_argument('-o', '--out_path', default=None,
help="Path to location for fasta file. Default will save by release name.")
parser.add_argument('-z', '--zip', action='store_false',
help="Set if you do not want to gunzip fasta file after downloading.")
args = _parse_args_or_exit(parser)
_run_download_gencode(args.biotype, args.species, args.release, args.out_path, args.zip)
def _run_canonical_gencode(in_fasta, out_fasta, zeros, unique_per_gene):
# Note: This function is separated from console_kmer_counts for testing purposes.
maker = fasta.Maker(in_fasta, out_fasta)
maker.filter1(zeros, unique_per_gene)
def console_canonical_gencode():
assert sys.version_info[0] == 3, 'Python version must be 3.x'
parser = argparse.ArgumentParser(usage=CANONICAL_GENCODE_DOC,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('in_fasta', help='Full path of fasta file.')
parser.add_argument('out_fasta', help='Full path of filtered fasta file.')
parser.add_argument('-z', '--zeros', default=1,
help='Number of zeroes needed to be considered canonical.')
parser.add_argument('-u', '--unique_per_gene', action='store_true',
help='Set to enforce a limit of one isoform per ENSG id.')
args = _parse_args_or_exit(parser)
_run_canonical_gencode(args.in_fasta, args.out_fasta, args.zeros, args.unique_per_gene)
def _run_kmer_counts(fasta, outfile, kmer, binary, centered, standardized,
log2, remove_labels, mean_vector, std_vector, alphabet):
# Note: This function is separated from console_kmer_counts for testing purposes.
mean = mean_vector or centered
std = std_vector or standardized
label = not remove_labels
counter = BasicCounter(fasta, outfile, kmer, binary,
mean, std, Log2[log2], label=label, alphabet=alphabet)
counter.make_count_file()
def console_kmer_counts():
assert sys.version_info[0] == 3, 'Python version must be 3.x'
parser = argparse.ArgumentParser(usage=KMER_COUNTS_DOC,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('fasta', help='Full path of fasta file.')
parser.add_argument('-o', '--outfile', default='counts.seekr',
help='Name of file to save counts to.')
parser.add_argument('-k', '--kmer', default=6,
help='Length of kmers you want to count.')
parser.add_argument('-b', '--binary', action='store_true',
help='Set if output should be a .npy file.')
parser.add_argument('-uc', '--uncentered', action='store_false',
help='Set if output should not have the mean subtracted.')
parser.add_argument('-us', '--unstandardized', action='store_false',
help='Set if output should not be divided by the standard deviation.')
parser.add_argument('-l', '--log2', default=Log2.post.name,
choices=[l2.name for l2 in Log2],
help='Decided if and when to log transform counts')
parser.add_argument('-rl', '--remove_labels', action='store_true',
help='Set to save without index and column labels.')
parser.add_argument('-mv', '--mean_vector', default=None,
help='Optional path to mean vector numpy file.')
parser.add_argument('-sv', '--std_vector', default=None,
help='Optional path to std vector numpy file.')
parser.add_argument('-a', '--alphabet', default='AGTC',
help='Valid letters to include in kmer.')
args = _parse_args_or_exit(parser)
_run_kmer_counts(args.fasta, args.outfile, int(args.kmer), args.binary, args.uncentered,
args.unstandardized, args.log2, args.remove_labels, args.mean_vector,
args.std_vector, args.alphabet)
def _run_pearson(counts1, counts2, outfile, binary_input, binary_output):
# Note: This function is separated for testing purposes.
names1 = None
names2 = None
if binary_input:
counts1 = np.load(counts1)
counts2 = np.load(counts2)
else:
counts1 = pd.read_csv(counts1, index_col=0)
counts2 = pd.read_csv(counts2, index_col=0)
names1 = counts1.index.values
names2 = counts2.index.values
if binary_output:
pearson.pearson(counts1, counts2, outfile=outfile)
else:
dist = pearson.pearson(counts1, counts2)
dist =
|
pd.DataFrame(dist, names1, names2)
|
pandas.DataFrame
|
# General data utils functions
import pandas as pd
import os
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from keras.utils import to_categorical
import numpy as np
def rebalance(frame: pd.DataFrame, col: str):
"""
Rebalance a DataFrame with imbalanced records
:param frame: DataFrame.
:param col: The target column
:return: Balanced DataFrame
"""
max_size = frame[col].value_counts().max()
lst = [frame]
for _, group in frame.groupby(col):
lst.append(group.sample(int(max_size - len(group)), replace=True))
frame_new =
|
pd.concat(lst)
|
pandas.concat
|
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# Invoke Libraries
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
import pandas as pd
from indepth.functions import remov_punct, symSentSim, wrdCnt, sbjVrbAgreement, modalRuleError, PrpDonot, VrbTenseAgreementError, a_an_error, realWrds, symSentSim, motionVerbs, coherentWrds, hyponymPolysem_cnt, concretMeaningPOS, vocabSize, commonWrd, buildFeatures
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# Main function
#&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
# for a pair of document and query, the top sentences with high symmetric sentence similarity are returned.
def MostSimilarSent(d, q, num_sents):
lcase = d.lower()
sentences = lcase.split('.')
q = remov_punct(q)
SSSlist = [(s, symSentSim(s,q)) for s in sentences if s]
df =
|
pd.DataFrame(SSSlist,columns = ['sentence','SSScore'])
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
import os
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import adjusted_rand_score
from sklearn.preprocessing import LabelEncoder
import operator
from scipy.stats import wilcoxon
from utils.constants import UNIVARIATE_DATASET_NAMES as DATASET_NAMES
from utils.constants import UNIVARIATE_ARCHIVE_NAMES as ARCHIVE_NAMES
from utils.constants import MAX_PROTOTYPES_PER_CLASS
def zNormalize(x):
x_mean = x.mean(axis=0) # mean for each dimension of time series x
x_std = x.std(axis=0) # std for each dimension of time series x
x = (x - x_mean) / (x_std)
return x
def readucr(filename):
data = np.loadtxt(filename, delimiter=',')
Y = data[:, 0]
X = data[:, 1:]
return X, Y
def check_if_file_exits(file_name):
return os.path.exists(file_name)
def create_directory(directory_path):
if os.path.exists(directory_path):
return directory_path
else:
try:
os.makedirs(directory_path)
except:
# in case another machine created the path meanwhile
return None
return directory_path
def transform_labels(y_train, y_test):
"""
Transform label to min equal zero and continuous
For example if we have [1,3,4] ---> [0,1,2]
"""
# init the encoder
encoder = LabelEncoder()
# concat train and test to fit
y_train_test = np.concatenate((y_train, y_test), axis=0)
# fit the encoder
encoder.fit(y_train_test)
# transform to min zero and continuous labels
new_y_train_test = encoder.transform(y_train_test)
# resplit the train and test
new_y_train = new_y_train_test[0:len(y_train)]
new_y_test = new_y_train_test[len(y_train):]
return new_y_train, new_y_test
def split_df(path):
# tsv文件,因此要用\t制表符隔开
data = pd.read_csv(path, header=None, sep='\t').values
Y = data[:, 0]
X = data[:, 1:]
return X, Y
# 取df的第一列为lable,后面列为数据
def read_all_datasets(root_dir, archive_name, sort_dataset_name=False):
datasets_dict = {}
dataset_names_to_sort = []
for dataset_name in DATASET_NAMES:
file_name = root_dir + archive_name + '/' + dataset_name + '/' + dataset_name
x_train, y_train = split_df(file_name + '_TRAIN.tsv')
x_test, y_test = split_df(file_name + '_TEST.tsv')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(), y_test.copy())
# 得到的x_train和y_train是np数组
# x_train, y_train = readucr(file_name + '_TRAIN')
# x_test, y_test = readucr(file_name + '_TEST')
# datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(), y_test.copy())
# dataset_names_to_sort.append((dataset_name, len(x_train)))
#
# item_getter = 1
# if sort_dataset_name == True:
# item_getter = 0
# dataset_names_to_sort.sort(key=operator.itemgetter(item_getter))
#
# for i in range(len(DATASET_NAMES)):
# DATASET_NAMES[i] = dataset_names_to_sort[i][0]
return datasets_dict
def calculate_metrics(y_true, y_pred, duration, clustering=False):
"""
Return a data frame that contains the precision, accuracy, recall and the duration
For clustering it applys the adjusted rand index
"""
if clustering == False:
res = pd.DataFrame(data=np.zeros((1, 5), dtype=np.float), index=[0],
columns=['precision', 'accuracy', 'error', 'recall', 'duration'])
res['precision'] = precision_score(y_true, y_pred, average='macro')
res['accuracy'] = accuracy_score(y_true, y_pred)
res['recall'] = recall_score(y_true, y_pred, average='macro')
res['duration'] = duration
res['error'] = 1 - res['accuracy']
return res
else:
res = pd.DataFrame(data=np.zeros((1, 2), dtype=np.float), index=[0],
columns=['ari', 'duration'])
res['duration'] = duration
res['ari'] = adjusted_rand_score(y_pred, y_true)
return res
def dataset_is_ready_to_plot(df_res, dataset_name, archive_name, array_algorithm_names):
for algorithm_name in array_algorithm_names:
# if any algorithm algorithm is not finished do not plot
if not any(df_res.loc[(df_res['dataset_name'] == dataset_name) \
& (df_res['archive_name'] == archive_name)] \
['algorithm_name'] == algorithm_name) \
or (df_res.loc[(df_res['dataset_name'] == dataset_name) \
& (df_res['archive_name'] == archive_name) \
& (df_res['algorithm_name'] == algorithm_name)] \
['nb_prototypes'].max() != MAX_PROTOTYPES_PER_CLASS):
return False
return True
def init_empty_df_metrics():
return pd.DataFrame(data=np.zeros((0, 5), dtype=np.float), index=[],
columns=['precision', 'accuracy', 'error', 'recall', 'duration'])
def get_df_metrics_from_avg(avg_df_metrics):
res = pd.DataFrame(data=np.zeros((1, 5), dtype=np.float), index=[0],
columns=['precision', 'accuracy', 'error', 'recall', 'duration'])
res['accuracy'] = avg_df_metrics['accuracy'].mean()
res['precision'] = avg_df_metrics['precision'].mean()
res['error'] = avg_df_metrics['error'].mean()
res['recall'] = avg_df_metrics['recall'].mean()
res['duration'] = avg_df_metrics['duration'].mean()
return res
def get_df_metrics_from_avg_data_cluster(avg_df_metrics):
res = pd.DataFrame(data=np.zeros((1, 2), dtype=np.float), index=[0],
columns=['ari', 'duration'])
res['ari'] = avg_df_metrics['ari'].mean()
res['duration'] = avg_df_metrics['duration'].mean()
return res
def read_dataset(root_dir, archive_name, dataset_name):
datasets_dict = {}
file_name = root_dir + '/' + archive_name + '/' + dataset_name + '/' + dataset_name
x_train, y_train = readucr(file_name + '_TRAIN')
x_test, y_test = readucr(file_name + '_TEST')
datasets_dict[dataset_name] = (x_train.copy(), y_train.copy(), x_test.copy(),
y_test.copy())
return datasets_dict
def plot_epochs_metric(hist, file_name, metric='loss'):
plt.figure()
plt.plot(hist.history[metric])
plt.plot(hist.history['val_' + metric])
plt.title('model ' + metric)
plt.ylabel(metric)
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.savefig(file_name)
plt.close()
def save_logs(output_directory, hist, y_pred, y_true, duration):
hist_df =
|
pd.DataFrame(hist.history)
|
pandas.DataFrame
|
"""
Contains the ligand similarity search class.
"""
from pathlib import Path
from typing_extensions import ParamSpecKwargs
import pandas as pd # for creating dataframes and handling data
from .consts import Consts
from .ligand import Ligand
from .helpers import pubchem, rdkit
class LigandSimilaritySearch:
"""
Automated ligand similarity-search process of the pipeline.
Take in input Ligand object, Specs.LigandSimilaritySearch object,
and the corresponding output path, and automatically run all the necessary
processes to output a set of analogs with the highest drug-likeness scores.
Attributes
----------
TODO
all_analogs
"""
def __init__(
self,
ligand_obj,
similarity_search_specs_obj,
similarity_search_output_path,
frozen_data_filepath=None,
):
"""
Initialize the ligand similarity search.
Parameters
----------
ligand_obj : utils.Ligand
The Ligand object of the project.
similarity_search_specs_obj : utils.Specs.LigandSimilaritySearch
The similarity search specification data-class of the project.
similarity_search_output_path : str or pathlib.Path
Output path of the project's similarity search information.
frozen_data_filepath : str or pathlib.Path
If existing data is to be used, provide the path to a csv file
containing the columns "CID" and "CanonicalSMILES" for the analogs.
"""
similarity_search_output_path = Path(similarity_search_output_path)
if not frozen_data_filepath is None:
all_analog_identifiers_df = pd.read_csv(frozen_data_filepath)
elif (
similarity_search_specs_obj.search_engine
is Consts.LigandSimilaritySearch.SearchEngines.PUBCHEM
):
analogs_info = pubchem.similarity_search(
ligand_obj.smiles,
similarity_search_specs_obj.min_similarity_percent,
similarity_search_specs_obj.max_num_results,
)
all_analog_identifiers_df = pd.DataFrame(analogs_info)
else:
raise ValueError(f"Search engine unknown: {similarity_search_specs_obj.search_engine}")
# create dataframe from initial results
all_analog_identifiers_df["Mol"] = all_analog_identifiers_df["CanonicalSMILES"].apply(
lambda smiles: rdkit.create_molecule_object("smiles", smiles)
)
all_analog_identifiers_df["dice_similarity"] = all_analog_identifiers_df["Mol"].apply(
lambda mol: rdkit.calculate_similarity_dice(ligand_obj.rdkit_obj, mol)
)
all_analog_properties_df = pd.DataFrame(
(
all_analog_identifiers_df["Mol"].apply(
lambda mol: rdkit.calculate_druglikeness(mol)
)
).tolist()
)
all_analogs_df =
|
pd.concat([all_analog_identifiers_df, all_analog_properties_df], axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 19 15:51:48 2021
@author: anamaria
"""
import gym
import highway_env
from stable_baselines3 import DQN
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
env = gym.make("roundabout-v130")
model = DQN('MlpPolicy', env,
policy_kwargs=dict(net_arch=[256, 256]),
learning_rate=5e-4,
buffer_size=15000,
learning_starts=200,
batch_size=32,
gamma=0.8,
train_freq=1,
gradient_steps=1,
target_update_interval=50,
verbose=1,
tensorboard_log="highway_dqn/")
model.learn(int(2e4))
model.save("highway_dqn/model")
actions = []
vel = []
target_vel=[]
acc=[]
pos=[]
lat_vel=[]
action_names = ['LEFT LANE','IDLE','RIGHT LANE','FASTER','SLOWER']
df =
|
pd.DataFrame(columns = ['vehicle', 'x','y','vx','vy'])
|
pandas.DataFrame
|
# Script to extract image date from filename
# and update the meta.csv file with system:time_start property
import pandas as pd
import re
filename = 'meta.csv'
df =
|
pd.read_csv(filename)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
import nose
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class TestIndexCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def test_setitem_index_numeric_coercion_int(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.index.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[5] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 5]))
self.assertEqual(temp.index.dtype, np.int64)
# int + float -> float
temp = s.copy()
temp[1.1] = 5
tm.assert_series_equal(temp, pd.Series([1, 2, 3, 4, 5],
index=[0, 1, 2, 3, 1.1]))
self.assertEqual(temp.index.dtype, np.float64)
def test_setitem_index_numeric_coercion_float(self):
# tests setitem with non-existing numeric key
s = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(s.index.dtype, np.float64)
# float + int -> int
temp = s.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
temp = s.copy()
temp[5.1] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=[1.1, 2.1, 3.1, 4.1, 5.1])
tm.assert_series_equal(temp, exp)
self.assertEqual(temp.index.dtype, np.float64)
def test_insert_numeric_coercion_int(self):
idx = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.int64)
# int + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1, 1, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
# int + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1, 1.1, 2, 3, 4]))
self.assertEqual(res.dtype, np.float64)
# int + bool -> int
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1, 0, 2, 3, 4]))
self.assertEqual(res.dtype, np.int64)
def test_insert_numeric_coercion_float(self):
idx = pd.Float64Index([1, 2, 3, 4])
self.assertEqual(idx.dtype, np.float64)
# float + int -> int
res = idx.insert(1, 1)
tm.assert_index_equal(res, pd.Index([1., 1., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + float -> float
res = idx.insert(1, 1.1)
tm.assert_index_equal(res, pd.Index([1., 1.1, 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
# float + bool -> float
res = idx.insert(1, False)
tm.assert_index_equal(res, pd.Index([1., 0., 2., 3., 4.]))
self.assertEqual(res.dtype, np.float64)
class TestSeriesCoercion(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.rep = {}
self.rep['object'] = ['a', 'b']
self.rep['int64'] = [4, 5]
self.rep['float64'] = [1.1, 2.2]
self.rep['complex128'] = [1 + 1j, 2 + 2j]
self.rep['bool'] = [True, False]
def test_setitem_numeric_coercion_int(self):
s = pd.Series([1, 2, 3, 4])
self.assertEqual(s.dtype, np.int64)
# int + int -> int
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
temp = s.copy()
temp[1] = 1.1
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
# int + complex -> complex
temp = s.copy()
temp[1] = 1 + 1j
tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 3, 4]))
self.assertEqual(temp.dtype, np.complex128)
# int + bool -> int
temp = s.copy()
temp[1] = True
tm.assert_series_equal(temp, pd.Series([1, 1, 3, 4]))
self.assertEqual(temp.dtype, np.int64)
def test_setitem_numeric_coercion_float(self):
s = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(s.dtype, np.float64)
# float + int -> float
temp = s.copy()
temp[1] = 1
tm.assert_series_equal(temp,
|
pd.Series([1.1, 1.0, 3.3, 4.4])
|
pandas.Series
|
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
from pandas import (
DataFrame,
DatetimeIndex,
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.window import ExponentialMovingWindow
def test_doc_string():
df = DataFrame({"B": [0, 1, 2, np.nan, 4]})
df
df.ewm(com=0.5).mean()
def test_constructor(frame_or_series):
c = frame_or_series(range(5)).ewm
# valid
c(com=0.5)
c(span=1.5)
c(alpha=0.5)
c(halflife=0.75)
c(com=0.5, span=None)
c(alpha=0.5, com=None)
c(halflife=0.75, alpha=None)
# not valid: mutually exclusive
msg = "comass, span, halflife, and alpha are mutually exclusive"
with pytest.raises(ValueError, match=msg):
c(com=0.5, alpha=0.5)
with pytest.raises(ValueError, match=msg):
c(span=1.5, halflife=0.75)
with pytest.raises(ValueError, match=msg):
c(alpha=0.5, span=1.5)
# not valid: com < 0
msg = "comass must satisfy: comass >= 0"
with pytest.raises(ValueError, match=msg):
c(com=-0.5)
# not valid: span < 1
msg = "span must satisfy: span >= 1"
with pytest.raises(ValueError, match=msg):
c(span=0.5)
# not valid: halflife <= 0
msg = "halflife must satisfy: halflife > 0"
with pytest.raises(ValueError, match=msg):
c(halflife=0)
# not valid: alpha <= 0 or alpha > 1
msg = "alpha must satisfy: 0 < alpha <= 1"
for alpha in (-0.5, 1.5):
with pytest.raises(ValueError, match=msg):
c(alpha=alpha)
@pytest.mark.parametrize("method", ["std", "mean", "var"])
def test_numpy_compat(method):
# see gh-12811
e = ExponentialMovingWindow(Series([2, 4, 6]), alpha=0.5)
msg = "numpy operations are not valid with window objects"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(e, method)(dtype=np.float64)
def test_ewma_times_not_datetime_type():
msg = r"times must be datetime64\[ns\] dtype."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(5))
def test_ewma_times_not_same_length():
msg = "times must be the same length as the object."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]"))
def test_ewma_halflife_not_correct_type():
msg = "halflife must be a timedelta convertible object"
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))
def test_ewma_halflife_without_times(halflife_with_times):
msg = "halflife can only be a timedelta convertible argument if times is not None."
with pytest.raises(ValueError, match=msg):
Series(range(5)).ewm(halflife=halflife_with_times)
@pytest.mark.parametrize(
"times",
[
np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"),
date_range("2000", freq="D", periods=10),
date_range("2000", freq="D", periods=10).tz_localize("UTC"),
],
)
@pytest.mark.parametrize("min_periods", [0, 2])
def test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
with tm.assert_produces_warning(FutureWarning, match="nuisance columns"):
# GH#42738
result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean()
expected = df.ewm(halflife=1.0, min_periods=min_periods).mean()
tm.assert_frame_equal(result, expected)
def test_ewma_with_times_variable_spacing(tz_aware_fixture):
tz = tz_aware_fixture
halflife = "23 days"
times = DatetimeIndex(
["2020-01-01", "2020-01-10T00:04:05", "2020-02-23T05:00:23"]
).tz_localize(tz)
data = np.arange(3)
df = DataFrame(data)
result = df.ewm(halflife=halflife, times=times).mean()
expected = DataFrame([0.0, 0.5674161888241773, 1.545239952073459])
tm.assert_frame_equal(result, expected)
def test_ewm_with_nat_raises(halflife_with_times):
# GH#38535
ser = Series(range(1))
times = DatetimeIndex(["NaT"])
with pytest.raises(ValueError, match="Cannot convert NaT values to integer"):
ser.ewm(com=0.1, halflife=halflife_with_times, times=times)
def test_ewm_with_times_getitem(halflife_with_times):
# GH 40164
halflife = halflife_with_times
data = np.arange(10.0)
data[::2] = np.nan
times = date_range("2000", freq="D", periods=10)
df = DataFrame({"A": data, "B": data})
result = df.ewm(halflife=halflife, times=times)["A"].mean()
expected = df.ewm(halflife=1.0)["A"].mean()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("arg", ["com", "halflife", "span", "alpha"])
def test_ewm_getitem_attributes_retained(arg, adjust, ignore_na):
# GH 40164
kwargs = {arg: 1, "adjust": adjust, "ignore_na": ignore_na}
ewm = DataFrame({"A": range(1), "B": range(1)}).ewm(**kwargs)
expected = {attr: getattr(ewm, attr) for attr in ewm._attributes}
ewm_slice = ewm["A"]
result = {attr: getattr(ewm, attr) for attr in ewm_slice._attributes}
assert result == expected
def test_ewm_vol_deprecated():
ser = Series(range(1))
with tm.assert_produces_warning(FutureWarning):
result = ser.ewm(com=0.1).vol()
expected = ser.ewm(com=0.1).std()
tm.assert_series_equal(result, expected)
def test_ewma_times_adjust_false_raises():
# GH 40098
with pytest.raises(
NotImplementedError, match="times is not supported with adjust=False."
):
Series(range(1)).ewm(
0.1, adjust=False, times=date_range("2000", freq="D", periods=1)
)
@pytest.mark.parametrize(
"func, expected",
[
[
"mean",
DataFrame(
{
0: range(5),
1: range(4, 9),
2: [7.428571, 9, 10.571429, 12.142857, 13.714286],
},
dtype=float,
),
],
[
"std",
DataFrame(
{
0: [np.nan] * 5,
1: [4.242641] * 5,
2: [4.6291, 5.196152, 5.781745, 6.380775, 6.989788],
}
),
],
[
"var",
DataFrame(
{
0: [np.nan] * 5,
1: [18.0] * 5,
2: [21.428571, 27, 33.428571, 40.714286, 48.857143],
}
),
],
],
)
def test_float_dtype_ewma(func, expected, float_numpy_dtype):
# GH#42452
df = DataFrame(
{0: range(5), 1: range(6, 11), 2: range(10, 20, 2)}, dtype=float_numpy_dtype
)
e = df.ewm(alpha=0.5, axis=1)
result = getattr(e, func)()
tm.assert_frame_equal(result, expected)
def test_times_string_col_deprecated():
# GH 43265
data = np.arange(10.0)
data[::2] = np.nan
df = DataFrame({"A": data, "time_col": date_range("2000", freq="D", periods=10)})
with tm.assert_produces_warning(FutureWarning, match="Specifying times"):
result = df.ewm(halflife="1 day", min_periods=0, times="time_col").mean()
expected = df.ewm(halflife=1.0, min_periods=0).mean()
tm.assert_frame_equal(result, expected)
def test_ewm_sum_adjust_false_notimplemented():
data = Series(range(1)).ewm(com=1, adjust=False)
with pytest.raises(NotImplementedError, match="sum is not"):
data.sum()
@pytest.mark.parametrize(
"expected_data, ignore",
[[[10.0, 5.0, 2.5, 11.25], False], [[10.0, 5.0, 5.0, 12.5], True]],
)
def test_ewm_sum(expected_data, ignore):
# xref from Numbagg tests
# https://github.com/numbagg/numbagg/blob/v0.2.1/numbagg/test/test_moving.py#L50
data = Series([10, 0, np.nan, 10])
result = data.ewm(alpha=0.5, ignore_na=ignore).sum()
expected = Series(expected_data)
tm.assert_series_equal(result, expected)
def test_ewma_adjust():
vals = Series(np.zeros(1000))
vals[5] = 1
result = vals.ewm(span=100, adjust=False).mean().sum()
assert np.abs(result - 1) < 1e-2
def test_ewma_cases(adjust, ignore_na):
# try adjust/ignore_na args matrix
s = Series([1.0, 2.0, 4.0, 8.0])
if adjust:
expected = Series([1.0, 1.6, 2.736842, 4.923077])
else:
expected = Series([1.0, 1.333333, 2.222222, 4.148148])
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
def test_ewma_nan_handling():
s = Series([1.0] + [np.nan] * 5 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([1.0] * len(s)))
s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])
result = s.ewm(com=5).mean()
tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))
@pytest.mark.parametrize(
"s, adjust, ignore_na, w",
[
(
Series([np.nan, 1.0, 101.0]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],
),
(
Series([np.nan, 1.0, 101.0]),
False,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, 101.0]),
False,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
True,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
True,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],
),
(
Series([1.0, np.nan, 101.0]),
False,
False,
[(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([1.0, np.nan, 101.0]),
False,
True,
[(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
False,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
True,
True,
[np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
False,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),
False,
True,
[
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
np.nan,
np.nan,
(1.0 / (1.0 + 2.0)),
np.nan,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
True,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))),
1.0,
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
False,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 3,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0))
* ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))),
],
),
(
Series([1.0, np.nan, 101.0, 50.0]),
False,
True,
[
(1.0 - (1.0 / (1.0 + 2.0))) ** 2,
np.nan,
(1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),
(1.0 / (1.0 + 2.0)),
],
),
],
)
def test_ewma_nan_handling_cases(s, adjust, ignore_na, w):
# GH 7603
expected = (s.multiply(w).cumsum() / Series(w).cumsum()).fillna(method="ffill")
result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()
tm.assert_series_equal(result, expected)
if ignore_na is False:
# check that ignore_na defaults to False
result = s.ewm(com=2.0, adjust=adjust).mean()
tm.assert_series_equal(result, expected)
def test_ewm_alpha():
# GH 10789
arr = np.random.randn(100)
locs = np.arange(20, 40)
arr[locs] = np.NaN
s = Series(arr)
a = s.ewm(alpha=0.61722699889169674).mean()
b = s.ewm(com=0.62014947789973052).mean()
c = s.ewm(span=2.240298955799461).mean()
d = s.ewm(halflife=0.721792864318).mean()
tm.assert_series_equal(a, b)
|
tm.assert_series_equal(a, c)
|
pandas._testing.assert_series_equal
|
from itertools import islice
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from model_repair.cache42.cached_42 import cache_42
import tools.pudb_hook
from tools.utils import write_json, write_json_np_ok, read_json
from model_repair.override_for_release import get_interface
# @cache_42(ignore_args=["gcfg", "verbose"], force_recompute=True)
@cache_42(ignore_args=["gcfg", "verbose"])
def finetune_on_cluster_ewc(cfg, split_out, cluster_id, verbose, gcfg, cache):
if verbose:
print(f"\n\n\n ******************************************** Starting finetuning on {cluster_id} ******************************************** \n")
# Load dataframe
path_df = split_out.get_path() / "clustered_and_splitted.feather"
objs_info_old = pd.read_feather(str(path_df))
# Load interface to get split algorithm adapted to the task
interface = get_interface(gcfg)
# Get the model
if gcfg["trigger_extract"].epochs is None or len(gcfg["trigger_extract"].epochs) > 1:
# If 1 epoch, it can be the ref epoch..
epoch = int(gcfg["trigger_extract"]["ref_epoch"])
elif len(gcfg["trigger_extract"].epochs) == 1:
epoch = gcfg["trigger_extract"].epochs[0]
checkpoint_path = interface.get_checkpoint_path(epoch)
model = interface.load_model(checkpoint_path)
# Create output path
out_path = cache.get_path()
out_path.mkdir(parents=True, exist_ok=True)
filename_best_val_model = str(out_path / "model_best_val.pytorch")
assert cluster_id is not None # NOTE: I used to use None to mean all clusters but that's just confusing now
# EWC set-up --------------------
fisher_dict = {}
optpar_dict = {}
if "alternative_ewc" in cfg:
# pass
if cfg["alternative_ewc"] == "continual_ai":
print("EWC continual_ai")
get_fisher_and_opt_params_continual_ai(cfg, fisher_dict, optpar_dict, objs_info_old, interface, model, cluster_id="correct")
elif cfg["alternative_ewc"] == "moskomule":
print("EWC moskomule")
get_fisher_and_opt_params_github_moskomule(cfg, fisher_dict, optpar_dict, objs_info_old, interface, model, cluster_id="correct")
else:
raise NotImplementedError()
else:
get_fisher_and_opt_params(cfg, fisher_dict, optpar_dict, objs_info_old, interface, model, cluster_id="correct")
previous_tasks = ["correct"]
if cluster_id == "all":
# Select all objects which are non -1 (non-mistakes)
objs_info_old_f = objs_info_old[objs_info_old["cluster"]!=-1]
train_info = objs_info_old_f[(objs_info_old_f["split_id"]==0)]
val_info = objs_info_old_f[(objs_info_old_f["split_id"]==1)]
n_clusters = objs_info_old["cluster"].unique().max() + 1
max_iter_mult_factor = n_clusters # If 5 clusters, training on all should do 5x the iters of training on one cluster
elif cluster_id == "nothing":
# Save current model and return (no finetuning)
torch.save(model.state_dict(), filename_best_val_model)
return cache
elif cluster_id == "other_epoch":
# Get the model from another epoch
checkpoint_path = interface.get_checkpoint_path(cfg["other_epoch"])
model = interface.load_model(checkpoint_path)
torch.save(model.state_dict(), filename_best_val_model)
return cache
elif isinstance(cluster_id, int):
objs_info_old_f = objs_info_old[objs_info_old["cluster"]==cluster_id]
train_info = objs_info_old_f[(objs_info_old_f["split_id"]==0)]
val_info = objs_info_old_f[(objs_info_old_f["split_id"]==1)]
max_iter_mult_factor = 1.0 # If 5 clusters, training on all should do 5x the iters of training on one cluster
else:
raise NotImplementedError()
# Get the dataset
example_key = interface.get_example_key()
specific_frames_cluster_train = train_info[example_key].unique().tolist()
specific_frames_cluster_val = val_info[example_key].unique().tolist()
if "prevent_tp_fpfn_overlap" in cfg and cfg["prevent_tp_fpfn_overlap"]:
print("prevent_tp_fpfn_overlap mode !")
specific_frames_correct_train, specific_frames_correct_val, specific_frames_correct_test = get_list_of_correct_frames(objs_info_old, example_key)
all_correct_frames = specific_frames_correct_train + specific_frames_correct_val + specific_frames_correct_test
specific_frames_cluster_train = list(set(specific_frames_cluster_train) - set(all_correct_frames))
specific_frames_cluster_val = list(set(specific_frames_cluster_val) - set(all_correct_frames))
dataloader_train = interface.get_dataloader(specific_frames=specific_frames_cluster_train)
dataloader_val = interface.get_dataloader(specific_frames=specific_frames_cluster_val)
optimizer = torch.optim.Adam(model.parameters(), lr=cfg["learning_rate"], weight_decay=cfg["weight_decay"])
best_val = -1
epoch_last_inc_val = 0
# NOTE: Start by saving not finetuned model so correct cluster early stopping can stop event at first epoch
torch.save(model.state_dict(), filename_best_val_model)
if "skip_finetuning" in cfg and cfg["skip_finetuning"]:
return cache
if "how_many_steps_to" in cfg:
# cfg["how_many_steps_to"]
how_many_steps_to = {k: np.inf for k in cfg["how_many_steps_to"]}
filename_how_many_steps_to = str(out_path / "how_many_steps_to.json")
stats_finetuning = {"train": {}, "val": {}}
filename_stats_finetuning = str(out_path / "stats_finetuning.json")
if "override_val_metric" in cfg:
print("override_val_metric")
val_metric = cfg["override_val_metric"]
else:
val_metric = interface.validation_metric()
if "train_grad_pos" not in cfg:
train_grad_pos = True
else:
train_grad_pos = cfg["train_grad_pos"]
if not train_grad_pos:
print("WARNING: no train_grad_pos_mode")
for epoch in range(cfg["max_epochs_train"]):
if verbose:
print(f"------------------------------------------ Epoch: {epoch} ------------------------------------------------------")
# Train : -----------------------------------------------------------------------------------------------------------------
torch.set_grad_enabled(True)
model.train()
keep_penalties = []
all_objs = []
sliced_dataloader_train = islice(dataloader_train, int(max_iter_mult_factor*cfg["max_steps_per_epoch"]))
for i_batch, sample in tqdm(enumerate(sliced_dataloader_train), total=len(dataloader_train)):
optimizer.zero_grad()
# Get the list of objects on this frame that belong to the same cluster and train split
example_key = interface.get_example_key()
this_cluster_objs = train_info[train_info[example_key]==sample[example_key][0] ]
first_loss_save = None
for idx, train_this in this_cluster_objs.iterrows():
if interface.requires_grad_pos() and train_grad_pos:
grad_pos = (train_this["r"], train_this["t"])
else:
grad_pos = None
loss, preds = interface.forward_loss(sample, grad_pos=grad_pos, output_preds=True)
# EWC penalty --------------------------------------------------------------
penalty = 0.0
for previous_cluster_id in previous_tasks:
for name, param in model.named_parameters():
fisher = fisher_dict[previous_cluster_id][name]
optpar = optpar_dict[previous_cluster_id][name]
if "ewc_prior" in cfg:
penal_fish = (fisher * (optpar - param).pow(2)).sum()
penal_prior = (cfg["ewc_prior"] * (optpar - param).pow(2)).sum()
penalty += (penal_fish + penal_prior) * cfg["ewc_lambda"] * 0.5
else:
penalty += (fisher * (optpar - param).pow(2)).sum() * cfg["ewc_lambda"]
keep_penalties.append(penalty.item())
loss = penalty + loss
loss.backward()
if first_loss_save is None:
first_loss_save = loss.cpu().detach().numpy()
if not train_grad_pos: # Only one iter
assert grad_pos is None
break
optimizer.step()
# Get the (new) objects for computing the metrics
objs = interface.get_objects(preds, sample) # List of dicts
objs = pd.DataFrame(objs)
# Identify the cluster for each object (-2 means new object, -1 correct cluster)
objs = interface.identify_cluster(objs, objs_info_old, frame_name=sample[example_key][0])
# Filter objects to only keep objects belonging to the current cluster
if cluster_id == "all":
objs = objs[objs["cluster"] >= 0]
else:
objs = objs[objs["cluster"] == cluster_id]
objs["loss"] = first_loss_save
all_objs.append(objs)
all_objs = pd.concat(all_objs, 0).reset_index(drop=True)
m = interface.compute_metrics(pd.DataFrame(all_objs))
m["penalty_ewc"] = np.mean(keep_penalties)
if verbose:
print(f"Train, {m}")
stats_finetuning["train"][epoch] = {
"all": None,
"correct": None,
"cluster": m,
}
stats_finetuning["val"][epoch] = {
"all": None,
"correct": None,
"cluster": None,
}
still_ok, correct_cluster_perf, m_correct = check_correct_cluster_perf(cfg, objs_info_old, interface, model)
stats_finetuning["val"][epoch]["correct"] = m_correct
preserve_correct_ok = still_ok
# Val : -----------------------------------------------------------------------------------------------------------------
torch.set_grad_enabled(False)
model.eval()
all_objs = []
sliced_dataloader_val = islice(dataloader_val, int(max_iter_mult_factor*cfg["max_steps_per_epoch"]))
for i_batch, sample in tqdm(enumerate(sliced_dataloader_val), total=len(dataloader_val)):
loss, preds = interface.forward_loss(sample, grad_pos=None, output_preds=True)
loss = loss.cpu().detach().numpy() # Note that value of loss for the val set is different for object detection as I use grad_pos=None
# Get the (new) objects for computing the metrics
objs = interface.get_objects(preds, sample) # List of dicts
objs = pd.DataFrame(objs)
# Identify the cluster for each object (-2 means new object, -1 correct cluster)
objs = interface.identify_cluster(objs, objs_info_old, frame_name=sample[example_key][0])
# Filter objects to only keep objects belonging to the current cluster
if cluster_id == "all":
objs = objs[objs["cluster"] >= 0]
else:
objs = objs[objs["cluster"] == cluster_id]
objs["loss"] = loss
all_objs.append(objs)
all_objs = pd.concat(all_objs, 0).reset_index(drop=True)
m = interface.compute_metrics(pd.DataFrame(all_objs))
if verbose:
print(f"Val, {m}")
validation_value = m[val_metric]
stats_finetuning["val"][epoch]["cluster"] = m
if (validation_value > best_val or best_val < 0) and preserve_correct_ok:
best_val = validation_value
epoch_last_inc_val = epoch
if verbose:
print(f"New best val !!!! -> {validation_value}")
print(f"Saving model to {filename_best_val_model}")
torch.save(model.state_dict(), filename_best_val_model)
if "how_many_steps_to" in cfg:
for thresh in how_many_steps_to.keys():
if validation_value > thresh:
how_many_steps_to[thresh] = epoch * max_iter_mult_factor * cfg["max_steps_per_epoch"]
write_json(filename_how_many_steps_to, how_many_steps_to)
write_json_np_ok(filename_stats_finetuning, stats_finetuning)
if epoch - epoch_last_inc_val > cfg["max_wait_epoch_inc"]:
break
# EWC specific -----------------------------------------------------------------------------------------------
def get_info_df(objs_info_old, cluster_id):
if cluster_id == "all":
# Select all objects which are non -1 (non-mistakes)
objs_info_old_f = objs_info_old[objs_info_old["cluster"]!=-1]
train_info = objs_info_old_f[(objs_info_old_f["split_id"]==0)]
val_info = objs_info_old_f[(objs_info_old_f["split_id"]==1)]
test_info = None
elif cluster_id == "correct":
# Select all objects which are -1 or -2
objs_info_old_f = objs_info_old[objs_info_old["cluster"]<0]
train_info = objs_info_old_f[(objs_info_old_f["split_id"]==0)]
val_info = objs_info_old_f[(objs_info_old_f["split_id"]==1)]
test_info = objs_info_old_f[(objs_info_old_f["split_id"]==2)]
elif isinstance(cluster_id, int):
objs_info_old_f = objs_info_old[objs_info_old["cluster"]==cluster_id]
train_info = objs_info_old_f[(objs_info_old_f["split_id"]==0)]
val_info = objs_info_old_f[(objs_info_old_f["split_id"]==1)]
test_info = None
else:
raise NotImplementedError()
return train_info, val_info, test_info
def get_fisher_and_opt_params(cfg, fisher_dict, optpar_dict, objs_info_old, interface, model, cluster_id):
train_info, val_info, test_info = get_info_df(objs_info_old, cluster_id)
if "use_tns_list" in cfg:
raise NotImplementedError()
# Get the dataset
example_key = interface.get_example_key()
dataloader_train = interface.get_dataloader(specific_frames=train_info[example_key].unique().tolist())
optimizer = torch.optim.Adam(model.parameters(), lr=cfg["learning_rate"])
torch.set_grad_enabled(True)
model.train()
all_objs = []
sliced_dataloader_train = islice(dataloader_train, cfg["n_samples"])
count = 0
for i_batch, sample in tqdm(enumerate(sliced_dataloader_train), total=len(dataloader_train)):
optimizer.zero_grad()
# Get the list of objects on this frame that belong to the same cluster and train split
example_key = interface.get_example_key()
this_cluster_objs = train_info[train_info[example_key]==sample[example_key][0] ]
for idx, train_this in this_cluster_objs.iterrows():
if interface.requires_grad_pos():
grad_pos = (train_this["r"], train_this["t"])
else:
grad_pos = None
loss, preds = interface.forward_loss(sample, grad_pos=grad_pos, output_preds=True)
loss.backward()
count += 1
if cluster_id not in optpar_dict:
optpar_dict[cluster_id] = {}
fisher_dict[cluster_id] = {}
for name, param in model.named_parameters():
if name not in optpar_dict[cluster_id]:
optpar_dict[cluster_id][name] = param.data.clone() # Only once (all the same)
fisher_dict[cluster_id][name] = param.grad.data.clone().pow(2)
else:
fisher_dict[cluster_id][name] += param.grad.data.clone().pow(2)
# Divide the sum for average (expected value)
for name, param in model.named_parameters():
fisher_dict[cluster_id][name] /= count
print(f"Fishier {name}", fisher_dict[cluster_id][name].mean())
# ----------------------------------------------------------------------------------------
def check_correct_cluster_perf(cfg, objs_info_old, interface, model):
if not "check_corrrect_use" in cfg or cfg["check_corrrect_use"] is False:
return True, None, None
if "override_val_metric" in cfg:
val_metric = cfg["override_val_metric"]
else:
val_metric = interface.validation_metric()
cluster_id = "correct"
train_info, val_info, test_info = get_info_df(objs_info_old, cluster_id)
# Get the dataset
example_key = interface.get_example_key()
specific_frames_correct_val = val_info[example_key].unique().tolist()
if "use_tns_list" in cfg:
specific_frames_correct_val += read_json(cfg["use_tns_list"])["val"]
dataloader_val = interface.get_dataloader(specific_frames=specific_frames_correct_val)
torch.set_grad_enabled(False)
model.eval()
all_objs = []
sliced_dataloader_val = islice(dataloader_val, cfg["check_correct_n_samples"])
for i_batch, sample in tqdm(enumerate(sliced_dataloader_val), total=len(dataloader_val)):
loss, preds = interface.forward_loss(sample, grad_pos=None, output_preds=True)
loss = loss.cpu().detach().numpy() # Note that value of loss for the val set is different for object detection as I use grad_pos=None
# Get the (new) objects for computing the metrics
objs = interface.get_objects(preds, sample) # List of dicts
objs =
|
pd.DataFrame(objs)
|
pandas.DataFrame
|
# License: Apache-2.0
import databricks.koalas as ks
import pandas as pd
import numpy as np
import pytest
from pandas.testing import assert_frame_equal
from gators.imputers.numerics_imputer import NumericsImputer
from gators.imputers.int_imputer import IntImputer
from gators.imputers.float_imputer import FloatImputer
from gators.imputers.object_imputer import ObjectImputer
ks.set_option('compute.default_index_type', 'distributed-sequence')
@pytest.fixture()
def data():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num():
X_int = pd.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing():
X_int = pd.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.copy(),
'float': X_float.copy(),
'object': X_object.copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = pd.concat([X_int, X_float, X_object], axis=1)
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
@pytest.fixture()
def data_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', None], 'F': ['a', 'a', 's', np.nan]})
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]})
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]})
X_object_expected = pd.DataFrame(
{'E': ['q', 'w', 'w', 'MISSING'], 'F': ['a', 'a', 's', 'MISSING']})
X_int_ks = ks.from_pandas(X_int)
X_float_ks = ks.from_pandas(X_float)
X_object_ks = ks.from_pandas(X_object)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_dict = {
'int': X_int_ks,
'float': X_float_ks,
'object': X_object_ks,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
'object': X_object_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_num_ks():
X_int = ks.DataFrame(
{'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]},
dtype=np.float32)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]},
dtype=np.float32)
X_int_expected = pd.DataFrame(
{'A': [0., 1., 1., -9.], 'B': [3., 4., 4., -9.]},
dtype=np.float32)
X_float_expected = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, 1.1], 'D': [2.1, 3.1, 4.1, 3.1]},
dtype=np.float32)
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
X_dict = {
'int': X_int,
'float': X_float,
}
X_expected_dict = {
'int': X_int_expected,
'float': X_float_expected,
}
objs_dict = {
'int': obj_int,
'float': obj_float,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture()
def data_no_missing_ks():
X_int = ks.DataFrame({'A': [0, 1, 1, 8], 'B': [3, 4, 4, 8]}, dtype=int)
X_float = ks.DataFrame(
{'C': [0.1, 1.1, 2.1, 9.], 'D': [2.1, 3.1, 4.1, 9.]})
X_object = ks.DataFrame(
{'E': ['q', 'w', 'w', 'x'], 'F': ['a', 'a', 's', 'x']})
obj_int = IntImputer(strategy='constant', value=-9).fit(X_int)
obj_float = FloatImputer(strategy='mean').fit(X_float)
obj_object = ObjectImputer(
strategy='constant', value='MISSING').fit(X_object)
X_dict = {
'int': X_int,
'float': X_float,
'object': X_object,
}
X_expected_dict = {
'int': X_int.to_pandas().copy(),
'float': X_float.to_pandas().copy(),
'object': X_object.to_pandas().copy(),
}
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X_dict, X_expected_dict
@pytest.fixture
def data_full_ks():
X_int = pd.DataFrame({'A': [0, 1, 1, np.nan], 'B': [3, 4, 4, np.nan]})
X_float = pd.DataFrame(
{'C': [0.1, 1.1, 2.1, np.nan], 'D': [2.1, 3.1, 4.1, np.nan]})
X_object = pd.DataFrame(
{'E': ['q', 'w', 'w', np.nan], 'F': ['a', 'a', 's', None]})
X = ks.from_pandas(pd.concat([X_int, X_float, X_object], axis=1))
X_expected = pd.DataFrame(
[[0.0, 3.0, 0.1, 2.1, 'q', 'a'],
[1.0, 4.0, 1.1, 3.1, 'w', 'a'],
[1.0, 4.0, 2.1, 4.1, 'w', 's'],
[-9.0, -9.0, 1.1, 3.1, 'w', 'a']],
columns=['A', 'B', 'C', 'D', 'E', 'F'],
)
obj_int = IntImputer(strategy='constant', value=-9).fit(X)
obj_float = FloatImputer(strategy='median').fit(X)
obj_object = ObjectImputer(strategy='most_frequent').fit(X)
objs_dict = {
'int': obj_int,
'float': obj_float,
'object': obj_object,
}
return objs_dict, X, X_expected
def test_int_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']), X_expected_dict['int'],
)
def test_float_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['float'].transform(
X_dict['float']), X_expected_dict['float'],
)
def test_object_pd(data):
objs_dict, X_dict, X_expected_dict = data
assert_frame_equal(
objs_dict['object'].transform(
X_dict['object']), X_expected_dict['object'],
)
@pytest.mark.koalas
def test_int_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['int'].transform(X_dict['int']).to_pandas(),
X_expected_dict['int'],)
@pytest.mark.koalas
def test_float_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['float'].transform(X_dict['float']).to_pandas(),
X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
assert_frame_equal(
objs_dict['object'].transform(X_dict['object']).to_pandas(),
X_expected_dict['object'],
)
def test_int_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
def test_float_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['float'].transform_numpy(X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
def test_object_pd_np(data):
objs_dict, X_dict, X_expected_dict = data
X_new_np = objs_dict['object'].transform_numpy(X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
assert_frame_equal(X_new, X_expected_dict['object'])
@pytest.mark.koalas
def test_int_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['int'].transform_numpy(X_dict['int'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['int'].columns)
assert_frame_equal(X_new, X_expected_dict['int'])
@pytest.mark.koalas
def test_float_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['float'].transform_numpy(
X_dict['float'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['float'].columns)
assert_frame_equal(X_new, X_expected_dict['float'])
@pytest.mark.koalas
def test_object_ks_np(data_ks):
objs_dict, X_dict, X_expected_dict = data_ks
X_new_np = objs_dict['object'].transform_numpy(
X_dict['object'].to_numpy())
X_new = pd.DataFrame(X_new_np, columns=X_dict['object'].columns)
|
assert_frame_equal(X_new, X_expected_dict['object'])
|
pandas.testing.assert_frame_equal
|
#!/usr/bin/env python
# coding: utf-8
import os
import pandas as pd
import numpy as np
from datetime import datetime
#####
# Helper Functions
#####
###
# Objective: Get all the csv files including within the subfolder
# input: root folder location
# output: list of csv files within given folder
def getCSVfile(foldername):
csvFile = []
for i, fname in enumerate(sorted(os.listdir(foldername))):
if fname.endswith(".csv"):
csvFile.append(foldername+"/"+fname)
elif (os.path.isdir(foldername+"/"+fname)):
csvFile += getCSVfile(foldername+"/"+fname)
return csvFile
###
# Objective: Read .csv file and standardize the column name (to avoid error in different column name)
def readCSVfile(filename):
df = pd.read_csv(filename)
df.columns = map(str.lower, df.columns)
return df
# Global variable
folder = 'E:/Henry/nyctaxi/'
folder = 'E:/Henry/nyctaxi/'
zone_fname = 'taxi_zone_lookup.csv'
# list of data files
year_arr = [2017,2018,2019]
taxi_arr = ['fhvhv','fhv','green','yellow']
hour = 24
public_holiday_arr = ['02-01-2017', '16-01-2017', '13-02-2017', '20-02-2017', '29-05-2017',]
# list of array of interested columns name
column_arr = ['orig_STR', 'dest_STR', 'pick_datetime_STR', 'drop_datetime_STR', 'passenger_count_STR']
# Get zone information
zone_df = pd.read_csv(folder+zone_fname, index_col=0)
# Remove unknown zone
zone_df = zone_df[zone_df['Borough'] != 'Unknown']
filter_zone_index = len(zone_df.index)
# Get nyctaxi information
files = getCSVfile(folder+'data')
# Variables to keep track removed samples
samples_removed_dict = {}
# Read taxi data CSV files
for i,f in enumerate(files):
# Default variable
# column name for origin pickup location
orig_STR = 'pulocationid'
# column name for destination dropoff location
dest_STR = 'dolocationid'
# column name for pickup date time
pick_datetime_STR = 'pickup_datetime'
# column name for dropoff date time
drop_datetime_STR = 'dropoff_datetime'
# column name for passenger count
passenger_count_STR = 'passenger_count'
try:
print('Working on file: ', f)
fname = f.split('_')[-1].split('-')
_year = int(fname[0])
_month = fname[1].replace('.csv','')
_taxi = ''
for t in taxi_arr:
if t in f:
_taxi = t
break
#print(_year,_month,_taxi)
df = readCSVfile(f)
total_samples = len(df.index)
print('Total sample size: ', total_samples)
###
# Get the right column naming for pickup and dropoff datetime
for c in df.columns:
if pick_datetime_STR in c:
pick_datetime_STR = c
if drop_datetime_STR in c:
drop_datetime_STR = c
# check if passenger_count columns exist:
if passenger_count_STR not in df.columns:
df[passenger_count_STR] = [1 for idx in df.index]
###
# Change the date-time columns from object to datetime
df[pick_datetime_STR] = pd.to_datetime(df[pick_datetime_STR], format='%Y%m%d %H:%M:%S', errors='coerce')
df[drop_datetime_STR] =
|
pd.to_datetime(df[drop_datetime_STR], format='%Y%m%d %H:%M:%S', errors='coerce')
|
pandas.to_datetime
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.