prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
'''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
from __future__ import print_function, absolute_import
import errno
import time
import numpy as np
import matplotlib
import torch.nn as nn
import torch.nn.init as init
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import datetime
import pandas as pd
import torch.nn.parallel
from sklearn.utils import shuffle
from sklearn.model_selection import StratifiedKFold
from torch.autograd import Variable
from torch.utils.data import TensorDataset
from torchvision.transforms import *
import nnmodels as nnmodels
from os import listdir
import sys
# __all__ = ['Logger', 'LoggerMonitor', 'savefig']
# __all__ = ['get_mean_and_std', 'init_params', 'mkdir_p', 'AverageMeter', 'accuracy']
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
def savefig(fname, dpi=None):
dpi = 500 if dpi == None else dpi
plt.savefig(fname, dpi=dpi)
def plot_overlap(logger, names=None):
names = logger.names if names == None else names
numbers = logger.numbers
for _, name in enumerate(names):
x = np.arange(len(numbers[name]))
if name in ['Train Acc.', 'Valid Acc.']:
plt.plot(x, 100 - np.asarray(numbers[name], dtype='float'))
else:
plt.plot(x, np.asarray(numbers[name]))
return [logger.title + '(' + name + ')' for name in names]
class Logger(object):
'''Save training process to log file with simple plot function.'''
def __init__(self, fpath, title=None, resume=False):
self.file = None
self.resume = resume
self.title = '' if title == None else title
if fpath is not None:
if resume:
self.file = open(fpath, 'r')
name = self.file.readline()
self.names = name.rstrip().split('\t')
self.numbers = {}
for _, name in enumerate(self.names):
self.numbers[name] = []
for numbers in self.file:
numbers = numbers.rstrip().split('\t')
for i in range(0, len(numbers)):
self.numbers[self.names[i]].append(numbers[i])
self.file.close()
self.file = open(fpath, 'a')
else:
self.file = open(fpath, 'w')
def set_names(self, names):
if self.resume:
pass
# initialize numbers as empty list
self.numbers = {}
self.names = names
for _, name in enumerate(self.names):
self.file.write(name)
self.file.write('\t')
self.numbers[name] = []
self.file.write('\n')
self.file.flush()
def append(self, numbers):
assert len(self.names) == len(numbers), 'Numbers do not match names'
for index, num in enumerate(numbers):
self.file.write("{0:.6f}".format(num))
self.file.write('\t')
self.numbers[self.names[index]].append(num)
self.file.write('\n')
self.file.flush()
def plot(self, names=None):
names = self.names if names == None else names
numbers = self.numbers
for _, name in enumerate(names):
x = np.arange(len(numbers[name]))
plt.plot(x, np.asarray(numbers[name]))
plt.legend([self.title + '(' + name + ')' for name in names])
plt.grid(True)
def close(self):
if self.file is not None:
self.file.close()
class LoggerMonitor(object):
'''Load and visualize multiple logs.'''
def __init__(self, paths):
'''paths is a distionary with {name:filepath} pair'''
self.loggers = []
for title, path in paths.items():
logger = Logger(path, title=title, resume=True)
self.loggers.append(logger)
def plot(self, names=None):
plt.figure()
plt.plot()
legend_text = []
for logger in self.loggers:
legend_text += plot_overlap(logger, names)
legend_text = ['WRN-28-10+Ours (error 17.65%)', 'WRN-28-10 (error 18.68%)']
plt.legend(legend_text, loc=0)
plt.ylabel('test error (%)')
plt.xlabel('epoch')
plt.grid(True)
def time_string():
ISOTIMEFORMAT = '%Y-%m-%d %X'
string = '[{}]'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = trainloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
'''make dir if not exist'''
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TrainningValidationSplitDataset(torch.utils.data.Dataset):
def __init__(self, full_ds, offset, length):
self.full_ds = full_ds
self.offset = offset
self.length = length
assert len(full_ds) >= offset + length, Exception("Parent Dataset not long enough")
super(TrainningValidationSplitDataset, self).__init__()
def __len__(self):
return self.length
def __getitem__(self, i):
return self.full_ds[i + self.offset]
def trainTestSplit(dataset, val_share):
val_offset = int(len(dataset) * (1 - val_share))
# print("Offest:" + str(val_offset))
return TrainningValidationSplitDataset(dataset, 0, val_offset), TrainningValidationSplitDataset(dataset, val_offset,
len(dataset) - val_offset)
def createNewDir(BASE_FOLDER):
parquet_dir = os.path.join(BASE_FOLDER, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
os.makedirs(parquet_dir)
return parquet_dir
def savePred(df_pred, local_model, val_score, train_score, save_path):
pre = save_path + '/' + '/pth/'
if not os.path.isdir(pre):
os.makedirs(pre)
fName = pre + str(val_score) + '_' + str(train_score)
torch.save(local_model.state_dict(), fName + '_cnn.pth')
csv_path = str(fName + '_submission.csv')
df_pred.to_csv(csv_path, columns=('id', 'is_iceberg'), index=None)
print(csv_path)
def MinMaxBestBaseStacking(input_folder, best_base, output_path):
sub_base = pd.read_csv(best_base)
all_files = os.listdir(input_folder)
# Read and concatenate submissions
outs = [pd.read_csv(os.path.join(input_folder, f), index_col=0) for f in all_files]
concat_sub = pd.concat(outs, axis=1)
cols = list(map(lambda x: "is_iceberg_" + str(x), range(len(concat_sub.columns))))
concat_sub.columns = cols
concat_sub.reset_index(inplace=True)
# get the data fields ready for stacking
concat_sub['is_iceberg_max'] = concat_sub.iloc[:, 1:6].max(axis=1)
concat_sub['is_iceberg_min'] = concat_sub.iloc[:, 1:6].min(axis=1)
concat_sub['is_iceberg_mean'] = concat_sub.iloc[:, 1:6].mean(axis=1)
concat_sub['is_iceberg_median'] = concat_sub.iloc[:, 1:6].median(axis=1)
# set up cutoff threshold for lower and upper bounds, easy to twist
cutoff_lo = 0.67
cutoff_hi = 0.33
concat_sub['is_iceberg_base'] = sub_base['is_iceberg']
concat_sub['is_iceberg'] = np.where(np.all(concat_sub.iloc[:, 1:6] > cutoff_lo, axis=1),
concat_sub['is_iceberg_max'],
np.where(np.all(concat_sub.iloc[:, 1:6] < cutoff_hi, axis=1),
concat_sub['is_iceberg_min'],
concat_sub['is_iceberg_base']))
concat_sub[['id', 'is_iceberg']].to_csv(output_path,
index=False, float_format='%.12f')
def ensembleVer2(input_folder, output_path):
print('Out:' + output_path)
csv_files = [f for f in os.listdir(input_folder) if f.endswith('.csv')]
model_scores = []
for i, csv in enumerate(csv_files):
df = pd.read_csv(os.path.join(input_folder, csv), index_col=0)
if i == 0:
index = df.index
else:
assert index.equals(df.index), "Indices of one or more files do not match!"
model_scores.append(df)
print("Read %d files. Averaging..." % len(model_scores))
# print(model_scores)
concat_scores = pd.concat(model_scores)
print(concat_scores.head())
concat_scores['is_iceberg'] = concat_scores['is_iceberg'].astype(np.float32)
averaged_scores = concat_scores.groupby(level=0).mean()
assert averaged_scores.shape[0] == len(list(index)), "Something went wrong when concatenating/averaging!"
averaged_scores = averaged_scores.reindex(index)
stacked_1 = pd.read_csv('statoil-submission-template.csv') # for the header
print(stacked_1.shape)
sub = pd.DataFrame()
sub['id'] = stacked_1['id']
sub['is_iceberg'] = np.exp(np.mean(
[
averaged_scores['is_iceberg'].apply(lambda x: np.log(x))
], axis=0))
print(sub.shape)
sub.to_csv(output_path, index=False, float_format='%.9f')
print("Averaged scores saved to %s" % output_path)
# Convert the np arrays into the correct dimention and type
# Note that BCEloss requires Float in X as well as in y
def XnumpyToTensor(x_data_np, args):
x_data_np = np.array(x_data_np, dtype=np.float32)
if args.use_cuda:
X_tensor = (torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch
else:
X_tensor = (torch.from_numpy(x_data_np)) # Note the conversion for pytorch
return X_tensor
# Convert the np arrays into the correct dimention and type
# Note that BCEloss requires Float in X as well as in y
def YnumpyToTensor(y_data_np, args):
y_data_np = y_data_np.reshape((y_data_np.shape[0], 1)) # Must be reshaped for PyTorch!
if args.use_cuda:
# Y = Variable(torch.from_numpy(y_data_np).type(torch.LongTensor).cuda())
Y_tensor = (torch.from_numpy(y_data_np)).type(torch.FloatTensor).cuda() # BCEloss requires Float
else:
# Y = Variable(torch.squeeze (torch.from_numpy(y_data_np).type(torch.LongTensor))) #
Y_tensor = (torch.from_numpy(y_data_np)).type(torch.FloatTensor) # BCEloss requires Float
return Y_tensor
class RecorderMeter(object):
"""Computes and stores the minimum loss value and its epoch index"""
def __init__(self, total_epoch):
self.reset(total_epoch)
def reset(self, total_epoch):
assert total_epoch > 0
self.total_epoch = total_epoch
self.current_epoch = 0
self.epoch_losses = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_losses = self.epoch_losses - 1
self.epoch_accuracy = np.zeros((self.total_epoch, 2), dtype=np.float32) # [epoch, train/val]
self.epoch_accuracy = self.epoch_accuracy
def update(self, idx, train_loss, train_acc, val_loss, val_acc):
assert idx >= 0 and idx < self.total_epoch, 'total_epoch : {} , but update with the {} index'.format(
self.total_epoch, idx)
self.epoch_losses[idx, 0] = train_loss
self.epoch_losses[idx, 1] = val_loss
self.epoch_accuracy[idx, 0] = train_acc
self.epoch_accuracy[idx, 1] = val_acc
self.current_epoch = idx + 1
return self.max_accuracy(False) == val_acc
def max_accuracy(self, istrain):
if self.current_epoch <= 0: return 0
if istrain:
return self.epoch_accuracy[:self.current_epoch, 0].max()
else:
return self.epoch_accuracy[:self.current_epoch, 1].max()
def plot_curve(self, save_path, args, model):
title = 'PyTorch-Ensembler:' + str((type(model).__name__)).upper() + ',LR:' + str(args.lr) + ',DataSet:' + str(args.dataset).upper() + ',' + '\n'\
+ ',Params: %.2fM' % (sum(p.numel() for p in model.parameters()) / 1000000.0) + ',Seed: %.2f' % args.manualSeed + \
",Torch: {}".format(torch.__version__) + ", Batch:{}".format(args.batch_size)
dpi = 80
width, height = 1200, 800
legend_fontsize = 14
scale_distance = 48.8
figsize = width / float(dpi), height / float(dpi)
fig = plt.figure(figsize=figsize)
x_axis = np.array([i for i in range(self.total_epoch)]) # epochs
y_axis = np.zeros(self.total_epoch)
plt.xlim(0, self.total_epoch)
plt.ylim(0, 1.0)
interval_y = 0.05 / 3.0
interval_x = 1
plt.xticks(np.arange(0, self.total_epoch + interval_x, interval_x))
plt.yticks(np.arange(0, 1.0 + interval_y, interval_y))
plt.grid()
plt.title(title, fontsize=18)
plt.xlabel('EPOCH', fontsize=16)
plt.ylabel('LOSS/ACC', fontsize=16)
y_axis[:] = self.epoch_accuracy[:, 0] / 100.0
plt.plot(x_axis, y_axis, color='g', linestyle='-', label='tr-accuracy/100', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_accuracy[:, 1] / 100.0
plt.plot(x_axis, y_axis, color='y', linestyle='-', label='val-accuracy/100', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 0]
plt.plot(x_axis, y_axis, color='r', linestyle=':', label='tr-loss', lw=2)
plt.legend(loc=4, fontsize=legend_fontsize)
y_axis[:] = self.epoch_losses[:, 1]
plt.plot(x_axis, y_axis, color='b', linestyle=':', label='val-loss', lw=4)
plt.legend(loc=4, fontsize=legend_fontsize)
if save_path is not None:
fig.savefig(save_path, dpi=dpi, bbox_inches='tight')
# print('---- save figure {} into {}'.format(title, save_path))
plt.close(fig)
def set_optimizer_lr(optimizer, lr):
# callback to set the learning rate in an optimizer, without rebuilding the whole optimizer
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
import math
# https://github.com/gngdb/pytorch-cifar-sgdr/blob/master/main.py
def sgdr(period, batch_idx):
# returns normalised anytime sgdr schedule given period and batch_idx
# best performing settings reported in paper are T_0 = 10, T_mult=2
# so always use T_mult=2
batch_idx = float(batch_idx)
restart_period = period
while batch_idx / restart_period > 1.:
batch_idx = batch_idx - restart_period
restart_period = restart_period * 2.
radians = math.pi * (batch_idx / restart_period)
return 0.5 * (1.0 + math.cos(radians))
# def adjust_learning_rate(optimizer, epoch):
# global lr
# """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
# lr = lr * (0.01 ** (epoch // 10))
# for param_group in optimizer.state_dict()['param_groups']:
# param_group['lr'] = lr
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 after 20 and 40 and 60 epochs"""
# global lr
lr = args.lr * (0.5 ** (epoch // 33)) * (0.5 ** (epoch // 20)) * (0.5 ** (epoch // 55))
print ('adjust_learning_rate: {} '.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def fixSeed(args):
random.seed(args.manualSeed)
np.random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if args.use_cuda:
torch.cuda.manual_seed(args.manualSeed)
torch.cuda.manual_seed_all(args.manualSeed)
def getStatoilTrainValLoaders(args,n_folds=5,current_fold=0):
fixSeed(args)
local_data = pd.read_json(args.data_path + '/train.json')
skf = StratifiedKFold(n_splits=n_folds,random_state=2018)
x=local_data['id'].values
y=local_data['is_iceberg'].values
for i,(train_ind,val_ind) in enumerate(skf.split(X=x,y=y)):
if i<current_fold:
pass
else:
tr_data = local_data.iloc[train_ind,:]
val_data = local_data.iloc[val_ind,:]
break
# local_data = shuffle(local_data) # otherwise same validation set each time!
# local_data = local_data.reindex(np.random.permutation(local_data.index))
tr_data['band_1'] = tr_data['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
tr_data['band_2'] = tr_data['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
tr_data['inc_angle'] = pd.to_numeric(tr_data['inc_angle'], errors='coerce')
tr_data['inc_angle'].fillna(0, inplace=True)
val_data['band_1'] = val_data['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
val_data['band_2'] = val_data['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
val_data['inc_angle'] = pd.to_numeric(val_data['inc_angle'], errors='coerce')
val_data['inc_angle'].fillna(0, inplace=True)
band_1_tr = np.concatenate([im for im in tr_data['band_1']]).reshape(-1, 75, 75)
band_2_tr = np.concatenate([im for im in tr_data['band_2']]).reshape(-1, 75, 75)
#band_3_tr = (band_1_tr+band_2_tr)/2
local_full_img_tr = np.stack([band_1_tr, band_2_tr], axis=1)#,band_3_tr], axis=1)
band_1_val = np.concatenate([im for im in val_data['band_1']]).reshape(-1, 75, 75)
band_2_val = np.concatenate([im for im in val_data['band_2']]).reshape(-1, 75, 75)
#band_3_val = (band_1_val+band_2_val)/2
local_full_img_val = np.stack([band_1_val, band_2_val], axis=1)#,band_3_val], axis=1)
train_imgs = XnumpyToTensor(local_full_img_tr, args)
train_targets = YnumpyToTensor(tr_data['is_iceberg'].values, args)
dset_train = TensorDataset(train_imgs, train_targets)
val_imgs = XnumpyToTensor(local_full_img_val, args)
val_targets = YnumpyToTensor(val_data['is_iceberg'].values, args)
dset_val = TensorDataset(val_imgs, val_targets)
# local_train_ds, local_val_ds = trainTestSplit(dset_train, args.validationRatio)
local_train_ds, local_val_ds = dset_train, dset_val
local_train_loader = torch.utils.data.DataLoader(local_train_ds, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
local_val_loader = torch.utils.data.DataLoader(local_val_ds, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
return local_train_loader, local_val_loader, local_train_ds, local_val_ds
def selectModel(args, m):
model = None
print("==> Creating model '{}'".format(m))
if m.startswith('senet'): # block, n_size=1, num_classes=1, num_rgb=2, base=32
model = nnmodels.senetXX_generic(args.num_classes, args.imgDim, args.base_factor)
# model = nnmodels.senet32_RG_1_classes(args.num_classes, args.imgDim)
args.batch_size = 4
args.batch_size = 4
args.epochs = 250
args.lr = 0.0007 # do not change !!! optimal for the Statoil data set
if m.startswith('densenet'):
model = nnmodels.densnetXX_generic(args.num_classes, args.imgDim)
args.batch_size = 32
args.batch_size = 32
args.epochs = 30
args.lr = 0.05
if m.startswith('minidensenet'):
model = nnmodels.minidensnetXX_generic(args.num_classes, args.imgDim)
args.batch_size = 32
args.batch_size = 32
args.epochs = 35
args.lr = 0.005 * 2
if m.startswith('vggnet'):
model = nnmodels.vggnetXX_generic(args.num_classes, args.imgDim)
args.batch_size = 64
args.batch_size = 64
args.epochs = 88
args.lr = 0.0005
if m.startswith('resnext'):
model = nnmodels.resnetxtXX_generic(args.num_classes, args.imgDim)
args.batch_size = 16
args.batch_size = 16
args.epochs = 66
args.lr = 0.0005
if m.startswith('lenet'):
model = nnmodels.lenetXX_generic(args.num_classes, args.imgDim)
args.batch_size = 64
args.batch_size = 64
args.epochs = 88
if m.startswith('wrn'):
model = nnmodels.wrnXX_generic(args.num_classes, args.imgDim)
args.batch_size = 16
args.batch_size = 16
args.epochs = 34
args.lr = 0.0005*2
if m.startswith('simple'):
model = nnmodels.simpleXX_generic(args.num_classes, args.imgDim)
args.batch_size = 256
args.batch_size = 256
args.epochs = 120
# if m.startswith('unet'):
# model = nnmodels.unetXX_generic(args.num_classes, args.imgDim)
# args.batch_size = 64
# args.batch_size = 64
# args.epochs = 50
# if m.startswith('link'):
# model = nnmodels.linknetXX_generic(args.num_classes, args.imgDim)
# args.batch_size = 64
# args.batch_size = 64
# args.epochs = 50
return model
def BinaryInferenceOofAndTest(local_model,args,n_folds = 5,current_fold=0):
if args.use_cuda:
local_model.cuda()
local_model.eval()
df_test_set = pd.read_json(args.data_path + '/test.json')
df_test_set['band_1'] = df_test_set['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
df_test_set['band_2'] = df_test_set['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
df_test_set['inc_angle'] = pd.to_numeric(df_test_set['inc_angle'], errors='coerce')
# df_test_set.head(3)
print(df_test_set.shape)
columns = ['id', 'is_iceberg']
df_pred_test = pd.DataFrame(data=np.zeros((0, len(columns))), columns=columns)
# df_pred.id.astype(int)
for index, row in df_test_set.iterrows():
rwo_no_id = row.drop('id')
band_1_test = (rwo_no_id['band_1']).reshape(-1, 75, 75)
band_2_test = (rwo_no_id['band_2']).reshape(-1, 75, 75)
# band_3_test = (band_1_test + band_2_test) / 2
full_img_test = np.stack([band_1_test, band_2_test], axis=1)
x_data_np = np.array(full_img_test, dtype=np.float32)
if args.use_cuda:
X_tensor_test = Variable(torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch
else:
X_tensor_test = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch
# X_tensor_test=X_tensor_test.view(1, trainX.shape[1]) # does not work with 1d tensors
predicted_val = (local_model(X_tensor_test).data).float() # probabilities
p_test = predicted_val.cpu().numpy().item() # otherwise we get an array, we need a single float
df_pred_test = df_pred_test.append({'id': row['id'], 'is_iceberg': p_test}, ignore_index=True)
df_val_set = pd.read_json(args.data_path + '/train.json')
skf = StratifiedKFold(n_splits=n_folds,random_state=2018)
x=df_val_set['id'].values
y=df_val_set['is_iceberg'].values
columns = ['id', 'is_iceberg']
for i,(train_ind,val_ind) in enumerate(skf.split(X=x,y=y)):
if i<current_fold:
pass
else:
ids_and_labels = df_val_set.iloc[val_ind,[2,4]]
df_val_set = df_val_set.iloc[val_ind,:]
break
df_val_set['band_1'] = df_val_set['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
df_val_set['band_2'] = df_val_set['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
df_val_set['inc_angle'] = pd.to_numeric(df_val_set['inc_angle'], errors='coerce')
# df_test_set.head(3)
print(df_val_set.shape)
columns = ['id', 'is_iceberg']
df_pred_val = pd.DataFrame(data=np.zeros((0, len(columns))), columns=columns)
# df_pred.id.astype(int)
for index, row in df_val_set.iterrows():
rwo_no_id = row.drop('id')
band_1_test = (rwo_no_id['band_1']).reshape(-1, 75, 75)
band_2_test = (rwo_no_id['band_2']).reshape(-1, 75, 75)
# band_3_test = (band_1_test + band_2_test) / 2
full_img_test = np.stack([band_1_test, band_2_test], axis=1)
x_data_np = np.array(full_img_test, dtype=np.float32)
if args.use_cuda:
X_tensor_test = Variable(torch.from_numpy(x_data_np).cuda()) # Note the conversion for pytorch
else:
X_tensor_test = Variable(torch.from_numpy(x_data_np)) # Note the conversion for pytorch
# X_tensor_test=X_tensor_test.view(1, trainX.shape[1]) # does not work with 1d tensors
predicted_val = (local_model(X_tensor_test).data).float() # probabilities
p_test = predicted_val.cpu().numpy().item() # otherwise we get an array, we need a single float
df_pred_val = df_pred_val.append({'id': row['id'], 'is_iceberg': p_test}, ignore_index=True)
return df_pred_val, df_pred_test, ids_and_labels
def BinaryInference(local_model, args):
if args.use_cuda:
local_model.cuda()
local_model.eval()
df_test_set = pd.read_json(args.data_path + '/test.json')
df_test_set['band_1'] = df_test_set['band_1'].apply(lambda x: np.array(x).reshape(75, 75))
df_test_set['band_2'] = df_test_set['band_2'].apply(lambda x: np.array(x).reshape(75, 75))
df_test_set['inc_angle'] = | pd.to_numeric(df_test_set['inc_angle'], errors='coerce') | pandas.to_numeric |
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
import pandas_datareader.data as web
pytestmark = pytest.mark.stable
class TestEcondb(object):
def test_get_cdh_e_fos(self):
# EUROSTAT
# Employed doctorate holders in non managerial and non professional
# occupations by fields of science (%)
df = web.DataReader(
"dataset=CDH_E_FOS&GEO=NO,PL,PT,RU&FOS07=FOS1&Y_GRAD=TOTAL",
"econdb",
start=pd.Timestamp("2005-01-01"),
end= | pd.Timestamp("2010-01-01") | pandas.Timestamp |
import numpy as np
#import matplotlib.pyplot as plt
import pandas as pd
import os
import math
#import beeswarm as bs
import sys
import time
import pydna
import itertools as it
import datetime
import dnaplotlib as dpl
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.patches as mpatch
from matplotlib.patches import FancyBboxPatch
from pydna.dseq import Dseq
from pydna.dseqrecord import Dseqrecord
from pydna.assembly import Assembly as pydAssembly
from Bio.Restriction import BsaI
from Bio.Restriction import BbsI
from Bio.Restriction import AarI
from Bio.Restriction import Esp3I
from copy import deepcopy as dc
import ipywidgets as widgets
from collections import defaultdict
from IPython.display import FileLink, FileLinks
import warnings
import re
def incrementString(s):
"""regular expression search! I forget exactly why this is needed"""
m = re.search(r'\d+$', s)
if(m):
return s[:m.start()]+str(int(m.group())+1)
else:
return s+str(0)
#the following makes a few data members for handling restriction enzymes
enzymelist = [BsaI,BbsI,AarI,Esp3I]
enzymes = {str(a):a for a in enzymelist}
enlist = [str(a) for a in enzymelist]+["gibson"]
#the following defines the overhangs in our library!
ENDDICT = { \
"GGAG":"A", \
"TACT":"B", \
"AATG":"C", \
"AGGT":"D", \
"GCTT":"E", \
"CGCT":"F", \
"TGCC":"G", \
"ACTA":"H", \
"TAGA":"sc3",\
"CATTACTCGCATCCATTCTCAGGCTGTCTCGTCTCGTCTC" : "1",\
"GCTGGGAGTTCGTAGACGGAAACAAACGCAGAATCCAAGC" : "2",\
"GCACTGAAGGTCCTCAATCGCACTGGAAACATCAAGGTCG" : "3",\
"CTGACCTCCTGCCAGCAATAGTAAGACAACACGCAAAGTC" : "4",\
"GAGCCAACTCCCTTTACAACCTCACTCAAGTCCGTTAGAG" : "5",\
"CTCGTTCGCTGCCACCTAAGAATACTCTACGGTCACATAC" : "6",\
"CAAGACGCTGGCTCTGACATTTCCGCTACTGAACTACTCG" : "7",\
"CCTCGTCTCAACCAAAGCAATCAACCCATCAACCACCTGG" : "8",\
"GTTCCTTATCATCTGGCGAATCGGACCCACAAGAGCACTG" : "9",\
"CCAGGATACATAGATTACCACAACTCCGAGCCCTTCCACC" : "X",\
}
#have a dictionary of the reverse complement too
rcENDDICT = {str(Dseq(a).rc()):ENDDICT[a] for a in ENDDICT}
prevplate = None
selenzyme = "gibson" #which enzyme to assemble everything with
chewnt = 40
frags = [] #fragments in the reaction
#the following lists the components in each well, in uL. I think this is outdated
#as of 4/25/19
gga = \
[["component","volume"],
#["buffer10x",0.4],
#["ATP10mM",0.4],
#["BsaI", 0.2],
#["ligase",0.2],
["NEBbuffer",0.4],
["NEBenzyme",0.2],
["water",1.4],
["dnasln",1],
]
gibassy = \
[["component","volume"],
["GGAMM",1],
["dnasln",1]]
ggaPD = pd.DataFrame(gga[1:],columns=gga[0]) #this just turns it into a data frame
gibassyPD = pd.DataFrame(gibassy[1:],columns=gibassy[0])
ggaFm = 6.0
ggavecGm = 6.0
gibFm = 6.0
gibvecFm = 6.0
partsFm = ggaFm #default is gga
vectorFm = ggavecGm
source = "384PP_AQ_BP"
ptypedict = {
"ASSGGA04":"384PP_PLUS_AQ_BP",
"ASSGIB01":"384LDV_PLUS_AQ_BP",
"ASSGIB02":"384PP_AQ_BP"}
waterwell = "P1" #in your source plate, include one well that is just full of water.
#dnaPath = os.path.join(".","DNA")
#go down and look at makeEchoFile
def startText():
print("Welcome to Moclo Assembly Helper V1")
print("===================================")
def pickEnzyme():
"""asks the user about what kind of enzyme s/he wants to use"""
print("Which enzyme would you like to use?")
for el in range(len(enlist)):
print("[{}] {}".format(el,enlist[el]))
print()
userpick = int(input("type the number of your favorite! "))
selenzyme = enlist[userpick].lower()
print("===================================")
return selenzyme
def findExpts(path):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = []
#print(dirlist)
#for folder in dirlist[1:]:
folder = ['.']
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if("promoter" in fline):
expts+=[(os.path.join(folder[0],fle),fle[:-4])]
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = pd.read_excel(os.path.join(folder[0],fle),None)
dfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(dfs.keys()
if(dfs["Sheet1"].columns[0] == "promoter"):
expts+=[(os.path.join(folder[0],fle),fle[:-5])]
except (IOError,KeyError) as e:
pass
return sorted(expts)[::-1]
def findPartsLists(path):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print dirlist
expts = []
for fle in dirlist[0][2]:
#print fle
if(fle[-4:]=='xlsx'):
try:
xl_file = pd.read_excel(os.path.join(path,fle),None)
dfs = {sheet_name: xl_file.parse(sheet_name)
for sheet_name in xl_file.sheet_names}
#print(dfs.keys()
if("parts" in list(dfs.keys())[0]):
expts+=[(os.path.join(path,fle),fle[:-4])]
except IOError:
pass
return sorted(expts)[::-1]
def pickPartsList():
"""user interface for picking a list of parts to use. This list must
contain the concentration of each part as well as the 384 well location
of each part at minimum, but better to have more stuff. Check my example
file."""
print("Searching for compatible parts lists...")
pllist = findPartsLists(os.path.join(".","partslist"))
pickedlist = ''
if(len(pllist) <=0):
print("could not find any parts lists :(. Make sure they are in a \
seperate folder called 'partslist' in the same directory as this script")
else:
print("OK! I found")
print()
for el in range(len(pllist)):
print("[{}] {}".format(el,pllist[el][1]))
print()
if(len(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = pd.read_excel(pickedlist,None)
print("===================================")
return openlist
def pickAssembly():
"""user interface for defining assemblies to build"""
#manual = raw_input("would you like to manually enter the parts to assemble? (y/n)")
manual = "n"
if(manual == "n"):
print("searching for compatible input files...")
time.sleep(1)
pllist = findExpts(".")
#print pllist
pickedlist = ''
if(len(pllist) <=0):
print("could not find any assembly files")
else:
print("OK! I found")
print()
for el in range(len(pllist)):
print("[{}] {}".format(el,pllist[el][1]))
print()
if(len(pllist)==1):
pickedlist = pllist[0][0]
print("picked the only one in the list!")
else:
userpick = int(input("type the number of your favorite! "))
pickedlist = pllist[userpick][0]
openlist = pd.read_csv(pickedlist)
print("===================================")
return openlist,pickedlist
else:
print("sorry I haven't implemented this yet")
pickAssembly()
return pd.read_csv(aslist),aslist
def echoline(swell,dwell,tvol,sptype = source,spname = "Source[1]",\
dpname = "Destination[1]",platebc="",partid="",partname=""):
#if(platebc!=""):
# sptype = ptypedict[platebc]
return "{},{},{},{},{},{},,,{},{},{}\n".format(spname,platebc,sptype,swell,\
partid,partname,dpname,dwell,tvol)
def echoSinglePart(partDF,partname,partfm,dwell,printstuff=True,enzyme=enzymes["BsaI"]):
"""calculates how much of a single part to put in for a number of fm."""
try:
pwell = partDF[partDF.part==partname].well.iloc[0]
except IndexError:
raise ValueError("Couldn't find the right part named '"+\
partname+"'! Are you sure you're using the right parts list?")
return None, None, None
pDseq = makeDseqFromDF(partname,partDF,enzyme=enzyme)
pconc = partDF[partDF.part==partname]["conc (nM)"]
#concentration of said part, in the source plate
if(len(pconc)<=0):
#in this case we could not find the part!
raise ValueError("Part "+part+" had an invalid concentration!"+\
" Are you sure you're using the right parts list?")
pconc = pconc.iloc[0]
pplate = partDF[partDF.part==partname]["platebc"].iloc[0]
platet = partDF[partDF.part==partname]["platetype"].iloc[0]
e1,e2 = echoPipet(partfm,pconc,pwell,dwell,sourceplate=pplate,sptype=platet,\
partname=partname,printstuff=printstuff)
return e1,e2,pDseq,pplate,platet
def echoPipet(partFm,partConc,sourcewell,destwell,sourceplate=None,\
partname="",sptype=None,printstuff=True):
"""does the calculation to convert femtomoles to volumes, and returns
the finished echo line"""
pvol = (partFm/partConc)*1000
evol = int(pvol)
if(evol <= 25):#im not sure what happens when the echo would round to 0.
#better safe than sorry and put in one droplet.
evol = 25
if(sourceplate==None):
if(printstuff):
print("===> transfer from {} to {}, {} nl".format(sourcewell,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,partname=partname)
else:
if(printstuff):
print("===> transfer from {}, plate {} to {}, {} nl".format(sourcewell,sourceplate,destwell,evol))
echostring = echoline(sourcewell,destwell,evol,spname =sourceplate,\
sptype= sptype,platebc = sourceplate,partname=partname)
return echostring, evol
def makeDseqFromDF(part,partslist,col = "part",enzyme=enzymes["BsaI"]):
"""looks up the part named "part" in the column specified as col, and
converts it into a pydna object.
this program will check if an input sequence is a valid part.
This involves checking a couple of things:
1) are there only two restriction cut sites?
2) does it have the proper overhangs?
3) after being cut, does it produce one part with bsai sites and one part without?
"""
pseq = partslist[partslist[col] == part].sequence.iloc[0].lower()
pcirc = partslist[partslist[col] == part].circular.iloc[0]
p5pover = int(partslist[partslist[col] == part]["5pend"].iloc[0])
p3pover = int(partslist[partslist[col] == part]["3pend"].iloc[0])
povhg = int(p5pover)
pseqRC = str(Dseq(pseq).rc()).lower()
if(p5pover > 0):
pseq = pseq[p5pover:]
elif(p5pover<0):
pseqRC = pseqRC[:p5pover]
if(p3pover <0):
pseq = pseq[:p3pover]
elif(p3pover >0):
pseqRC = pseqRC[p5pover:]
pDseq = Dseq(pseq,pseqRC,ovhg=povhg)
#this defines a dsdna linear sequence
if(pcirc):
#this makes the sequence circular, if we have to
pDseq = pDseq.looped()
if(enzyme != None):
numzymes = len(enzyme.search(pDseq,linear=not pcirc))##\
#len(enzyme.search(pDseq.rc(),linear=pcirc))
if(numzymes < 2 and pcirc):
warnings.warn("Be careful! sequence {} has only {} {} site"\
.format(part,numzymes,str(enzyme)))
elif(numzymes>=2):
try:
testcut = pDseq.cut(enzyme)
except IndexError:
raise IndexError("something's wrong with part "+part)
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
if(numzymes > 2):
warnings.warn("{} has {} extra {} site{}!!"\
.format(part,numzymes-2,str(enzyme),'s'*((numzymes-2)>1)))
insert = []
backbone = []
for a in testcut:
fpend = a.five_prime_end()
tpend = a.three_prime_end()
if((a.find(esite)>-1) or (a.find(esiterc)>-1)):
#in this case the fragment we are looking at is the 'backbone'
backbone+=[a]
else:
#we didn't find any site sequences. this must be the insert!
insert+=[a]
if((not fpend[0]=='blunt') and \
(not ((fpend[1].upper() in ENDDICT) or \
(fpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.format(part,fpend[1].upper()))
if((not tpend[0]=='blunt') and \
(not ((tpend[1].upper() in ENDDICT) or \
(tpend[1].upper() in rcENDDICT)))):
warnings.warn("{} has non-standard overhang {}"\
.format(part,tpend[1].upper()))
if(len(insert)==0):
raise ValueError("{} does not produce any fragments with no cut site!".format(part))
if(len(insert)>1):
warnings.warn("{} produces {} fragments with no cut site".format(part,len(insert)))
if(len(backbone)>1):
dontwarn = False
if(not pcirc and len(backbone)==2):
#in this case we started with a linear thing and so we expect it
#to make two 'backbones'
dontwarn = True
if(not dontwarn):
warnings.warn("{} produces {} fragments with cut sites".format(part,len(backbone)))
return pDseq
def bluntLeft(DSseq):
"""returns true if the left hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.five_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def bluntRight(DSseq):
"""returns true if the right hand side of DSseq is blunt"""
if(type(DSseq)==Dseqrecord):
DSseq = DSseq.seq
isblunt = (DSseq.three_prime_end()[0]=='blunt')&DSseq.linear
return(isblunt)
def isNewDseq(newpart,partlist):
"""checks to see if newpart is contained within partlist, returns true
if it isn't"""
new = True
if(type(newpart)==Dseqrecord):
newdseqpart = newpart.seq
#seqnewpart = str(newpart).upper()
newcirc = newpart.circular
#dsequid = (newpart.seq).seguid()
#print("dsequid is "+str(dsequid))
#dsnewpart = Dseqrecord(newpart)
#rcnewpart = newpart.rc()
newseguid = newdseqpart.seguid()
#print("newseguid is "+str(newseguid))
cseguid = None
if(newcirc and type(newpart)==Dseqrecord):
cseguid = newpart.cseguid()
for part in partlist:
if(type(part == Dseqrecord)):
dseqpart = part.seq
partseguid = dseqpart.seguid()
if(newseguid==partseguid):
new=False
break
#if(len(part) != len(newpart)):
#continue
#dspart = Dseqrecord(part)
if(newcirc and part.circular):
if(type(part) == Dseqrecord and cseguid != None):
comparid = part.cseguid()
if(comparid == cseguid):
new=False
break
#if(seqnewpart in (str(part.seq).upper()*3)):
# new=False
# break
#elif(seqnewpart in (str(part.seq.rc()).upper()*3)):
# new=False
# break
#elif(part == newpart or part == rcnewpart):
#new=False
#break
return new
def allCombDseq(partslist,resultlist = []):
'''recursively finds all possible paths through the partslist'''
if(len(partslist)==1):
#if there's only one part, then "all possible paths" is only one
return partslist
else:
#result is the final output
result = []
for p in range(len(partslist)):
newplist = dc(partslist)
#basically the idea is to take the first part,
#and stick it to the front of every other possible assembly
part = newplist.pop(p)
#this is the recursive part
prevresult = allCombDseq(newplist)
partstoadd = []
freezult = dc(result)
#for z in prevresult:
for b in prevresult:
#maybe some of the other assemblies
#we came up with in the recursive step
#are the same as assemblies we will come up
#with in this step. For that reason we may
#want to cull them by not adding them
#to the "parts to add" list
if(isNewDseq(b,freezult)):
partstoadd+=[b]
#try to join the given part to everything else
if((not bluntRight(part)) and (not bluntLeft(b)) and part.linear and b.linear):
#this means we don't allow blunt ligations! We also don't allow
#ligations between a linear and a circular part. Makes sense right?
#since that would never work anyway
newpart = None
try:
#maybe we should try flipping one of these?
newpart= part+b
except TypeError:
#this happens if the parts don't have the right sticky ends.
#we can also try rotating 'part' around
pass
try:
#part b is not blunt on the left so this is OK,
#since blunt and not-blunt won't ligate
newpart = part.rc()+b
except TypeError:
pass
if(newpart == None):
#if the part is still None then it won't ligate forwards
#or backwards. Skip!
continue
try:
if((not bluntRight(newpart)) and (not bluntLeft(newpart))):
#given that the part assembled, can it be circularized?
newpart = newpart.looped()
#this thing will return TypeError if it can't be
#looped
except TypeError:
#this happens if the part can't be circularized
pass
if(isNewDseq(newpart,result)):
#this checks if the sequence we just made
#already exists. this can happen for example if we
#make the same circular assembly but starting from
#a different spot around the circle
result+=[newpart]
result+=partstoadd
return result
def pushDict(Dic,key,value):
"""adds a value to a dictionary, whether it has a key or not"""
try:
pval = Dic[key]
except KeyError:
if(type(value)==list or type(value)==tuple):
value = tuple(value)
pval = ()
elif(type(value)==str):
pval = ""
elif(type(value)==int):
pval = 0
elif(type(value)==float):
pval = 0.0
Dic[key] =pval + value
def findFilesDict(path=".",teststr = "promoter"):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
expts = {}
#print(dirlist)
#for folder in dirlist[1:]:
folder = [path]
#print(dirlist)
for fle in dirlist[0][2]:
if(fle[-3:]=='csv'):
try:
#print('{}\\{}'.format(folder[0],fle))
fline = open(os.path.join(folder[0],fle),'r').readline().split(',')
if(teststr in fline):
expts[fle[:-4]]=os.path.join(folder[0],fle)
except IOError:
pass
if(fle[-4:]=='xlsx'):
try:
xl_file = pd.read_excel(os.path.join(folder[0],fle))
#dfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(dfs.keys()
#print(xl_file.columns)
if(teststr in xl_file.columns):
#print("found")
expts[fle[:-5]]=os.path.join(folder[0],fle)
except (IOError,KeyError) as e:
pass
return expts
def findPartsListsDict(path,teststr = "parts_1"):
"""gets a list of files/folders present in a path"""
walkr = os.walk(path)
dirlist = [a for a in walkr]
#print(dirlist[0][2])
expts = {}
for fle in dirlist[0][2]:
#print fle
if((fle[-4:]=='xlsx') or (fle[-4:]=='xlsm')):
try:
dfs = pd.read_excel(os.path.join(path,fle),None)
#dfs = {sheet_name: xl_file.parse(sheet_name)
# for sheet_name in xl_file.sheet_names}
#print(dfs)
#print(dfs.keys())
if(teststr in list(dfs.keys())[0]):
expts[fle[:-5]] = os.path.join(path,fle)
except IOError:
pass
return expts
def findDNAPaths(startNode,nodeDict,edgeDict):
"""given a start, a dictionary of nodes, and a dictionary of edges,
find all complete paths for a DNA molecule
Complete is defined as: producing a molecule with all blunt edges,
or producing a circular molecule."""
#we assemble the DNA sequences from left to right.
nnode = dc(nodeDict)
noderight = nnode[startNode][1] #the right-hand overhang of the node in question.
del nnode[startNode]
destinations = edgeDict[noderight] #this could contain only one entry, the starting node
seqs = [] #haven't found any complete paths yet
nopaths = True
candidateSeqs = []
if(noderight != "blunt"): #blunt cannot go on
for destination in destinations:
#go through the list of destinations and see if we can go forward
if(destination[1]==0): #this node links to something else
if(destination[0] in nnode): #we havent visited it yet
nopaths = False
newpaths = findDNAPaths(destination[0],nnode,edgeDict) #find all paths from there!
for path in newpaths:
candidateSeqs+=[[startNode]+path]
if(nopaths): #if we dont find any paths, call it good
candidateSeqs+=[[startNode]]
#print("canseqs is {}".format(candidateSeqs))
return candidateSeqs
def getOverhang(Dnaseq,side="left"):
"""extracts the overhang in the DNA sequence, either on the left or right sides.
If the dna sequence is blunt, then the returned overhang is called 'blunt'"""
def appendPart(part,pind,edgeDict,nodeDict):
"""this function appends a part to a dictionary of
edges (overhangs), and nodes(middle sequence) for running DPallcombDseq.
part is a DseqRecord of a DNA part that's been cut by an enzyme.
pind is the index of that part in the parts list
edgedict is a dictionary of edges that says which nodes they are connected
to.
nodedict is a dictionary of nodes that says which edges they have."""
Lend = ""
Rend = ""
Ltype,Lseq = part.five_prime_end()
Rtype,Rseq = part.three_prime_end()
if(Ltype == "blunt"):
Lend = "blunt"
#if the end is blunt append nothing
edgeDict[Lend].append([pind,0])
#pushDict(edgeDict,Lend,((pind,0),))
else:
if(Ltype == "3'"):
#if we have a 3' overhang, then add that sequence
Lend = str(Dseq(Lseq).rc()).lower()
else:
#otherwise, it must be a 5' overhang since we handled the
#blunt condition above.
Lend = str(Lseq).lower()
edgeDict[Lend].append([pind,0])
if(Rtype == "blunt"):
#same thing for the right side
Rend = "blunt"
edgeDict[Rend].append([pind,1])
else:
if(Rtype == "5'"):
Rend = str(Dseq(Rseq).rc()).lower()
else:
Rend = str(Rseq).lower()
edgeDict[Rend].append([pind,1])
nodeDict[pind] = (Lend,Rend)
def annotateScar(part, end='3prime'):
plen = len(part)
if(end=='3prime'):
ovhg = part.seq.three_prime_end()
loc1 = plen-len(ovhg[1])
loc2 = plen
else:
ovhg = part.seq.five_prime_end()
loc1 = 0
loc2 = len(ovhg[1])
oseq = str(ovhg[1]).upper()
scarname = "?"
floc = int(loc1)
sloc = int(loc2)
dir = 1
#scardir = "fwd"
if((oseq in ENDDICT.keys()) or (oseq in rcENDDICT.keys())):
#either direction for now...
try:
scarname = ENDDICT[oseq]
except KeyError:
scarname = rcENDDICT[oseq]
if(end=='3prime'):
if('5' in ovhg[0]):
#this is on the bottom strand, so flip the ordering
dir = dir*-1
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so do nothing
pass
elif(end=='5prime'):
if('5' in ovhg[0]):
#this is on the top strand, so do nothing
pass
elif('3' in ovhg[0]):
#now we have a 3' overhang in the top strand, so flip the ordering
dir = dir*-1
if(oseq in rcENDDICT.keys()):
#so if we found the reverse complement in fact, then reverse everything
#again
dir = dir*-1
if(dir==-1):
floc = int(loc2)
sloc = int(loc1)
#oseq = str(Dseq(oseq).rc())
part.add_feature(floc,sloc,label=scarname,type="Scar")
def DPallCombDseq(partslist):
'''Finds all paths through the partsist using a graph type of approach.
First a graph is constructed from all possible overhang interactions,
then the program makes paths from every part to a logical conclusion
in the graph, then it backtracks and actually assembles the DNA.'''
#actually, we need to produce a graph which describes the parts FIRST
#then, starting from any part, traverse the graph in every possible path and store
#the paths which are "valid" i.e., produce blunt ended or circular products.
edgeDict = defaultdict(lambda : []) #dictionary of all edges in the partslist!
nodeDict = {}#defaultdict(lambda : [])
partDict = {}#defaultdict(lambda : [])
pind = 0
import time
rcpartslist = []
number_of_parts = len(partslist)
for part in partslist:
#this next part appends the part to the list of nodes and edges
appendPart(part,pind,edgeDict,nodeDict)
appendPart(part.rc(),pind+number_of_parts,edgeDict,nodeDict)
rcpartslist+=[part.rc()]
pind+=1
partslist+=rcpartslist
paths = []
for pind in list(nodeDict.keys()):
#find good paths through the graph starting from every part
paths += findDNAPaths(pind,nodeDict,edgeDict)
goodpaths = []
part1time = 0
part2time = 0
for path in paths:
#here we are looking at the first and last parts
#to see if they are blunt
fpart = path[0]
rpart = path[-1]
npart = False
accpart = Dseqrecord(partslist[fpart])
if(nodeDict[fpart][0]=="blunt" and nodeDict[rpart][1]=="blunt"):
#this means we have a blunt ended path! good
npart = True
plen = len(accpart)
#accpart.add_feature(0,3,label="?",type="scar")
#accpart.add_feature(plen-4,plen,label="?",type="scar")
for pind in path[1:]:
#this traces back the path
#we want to add features as we go representing the cloning
#scars. These scars could be gibson or golden gate in nature
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plen = len(accpart)
if("5" in ovhg[0]):
#ideally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plen-len(oseq),plen,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
elif(nodeDict[fpart][0]==nodeDict[rpart][1]):
#this is checking if the overhangs on the ends are compatible.
#if true, then create a circular piece of DNA!
npart = True
#this means we have a circular part! also good!
#accpart = partslist[fpart]
for pind in path[1:]:
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plen = len(accpart)
if("5" in ovhg[0]):
#ideally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plen-len(oseq),plen,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart+=partslist[pind]
#SCARANNOT
'''
ovhg = accpart.seq.three_prime_end()
oseq = ovhg[1]
plen = len(accpart)
if("5" in ovhg[0]):
#ideally we take note of what type of overhang it is
#but for now i'll just take the top strand sequence
oseq = str(Dseq(oseq).rc())
accpart.add_feature(plen-len(oseq),plen,label="?",type="scar")
#/scarannot'''
annotateScar(accpart)
accpart=accpart.looped()
if(npart):
#this checks if the part we think is good already exists
#in the list
if(isNewDseq(accpart,goodpaths)):
goodpaths+=[accpart]
#part2time+=time.time()-stime
#dtime = time.time()-stime
#stime = time.time()
#print("done tracing back paths, took "+str(dtime))
#print("first half took " + str(part1time))
#print("second half took " + str(part2time))
return goodpaths
def chewback(seqtochew,chewamt,end="fiveprime"):
"""chews back the amount mentioned, from the end mentioned."""
wat = seqtochew.watson
cri = seqtochew.crick
if(len(seqtochew) > chewamt*2+1):
if(end=="fiveprime"):
cwat = wat[chewamt:]
ccri = cri[chewamt:]
else:
cwat = wat[:-chewamt]
ccri = cri[:-chewamt]
newseq = Dseq(cwat,ccri,ovhg = chewamt)
return newseq
else:
return None
def makeEchoFile(parts,aslist,gga=ggaPD,partsFm=partsFm,source=source,\
output = "output.csv",selenzyme=selenzyme,fname="recentassembly",\
protocolsDF=None,sepfiles=True,sepfilename="outputLDV.csv",\
printstuff=True,progbar=None,mypath=".",annotateDF=None):
"""makes an echo csv using the given list of assemblies and source plate of
parts..
inputs:
parts: dataframe of what's in the source plate
aslist: dataframe of what we need to assemble
gga: a short dictionary indicating what volume of all the components
go into the reaction mix
partsFm: how many femtomoles of each part to use
source: the name of the source plate. like "384PP_AQ_BP or something
output: the name of the output file
selenzyme: the enzyme we are going to use for assembly. everything
is assembled with the same enzyme! actually this does nothing because
the enzyme is taken from the aslist thing anyway
fname: this is the name of the folder to save the successfully assembled
dna files into
protocolsDF: a dataframe containing a descriptor for different possible
protocols. For instance it would say how much DNA volume and
concentration we need for GGA or gibson."""
#this is the boilerplate columns list
dnaPath = os.path.join(mypath,"DNA")
outfile = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f1init = len(outfile)
outfile2 = "Source Plate Name,Source Plate Barcode,Source Plate Type,Source Well,\
Sample ID,Sample Name,Sample Group,Sample Comment,Destination Plate Name,\
Destination Well,Transfer Volume\n"
f2init = len(outfile2)
#this iterates through rows in the assembly list file. Each row
#defines an assembly, with the columns representing what parts go in.
#this may not be ideal but it's fairly human readable and we only do
#four parts + vector for each assembly.
_,fname = os.path.split(fname)
if("." in fname):
fname = fname[:fname.index(".")]
#the following is for making a spreadsheet style sequence list for
#performing further assemblies
prodSeqSpread = "well,part,description,type,left,right,conc (nM),date,numvalue,sequence,circular,5pend,3pend,length\n"
prevplate = None
prevtype = None
maxprog = float(len(aslist))
for assnum in range(len(aslist)):
#this goes row by row
if(progbar != None):
progbar.value=float(assnum+1)/maxprog
assembly = aslist[assnum:assnum+1] #cuts out one row of dataframe
dwell = assembly.targwell[assembly.targwell.index[0]] #well where assembly will happen
#print("pick enzyme")
#print(assembly)
enzyme=None
#if we are doing Gibson assembly, then the restriction enzyme is undefined
try:
selenzyme = assembly.enzyme[assembly.enzyme.index[0]]
#if the user forgot to define an enzyme assume it is BsaI. That's the most common one we use
except KeyError:
selenzyme = "BsaI"
if(protocolsDF!=None):
cprt_temp = "gga"
if(selenzyme == "gibson"):
cprt_temp = "gibson"
#iloc[0] is used in case there are multiple parts with the same
#name. Only the first one is used in that case.
curprot = {"dnasln": protocolsDF[(protocolsDF.protocol==cprt_temp)&\
(protocolsDF.component == "dnasln")].amount.iloc[0]}
partsFm = curprot[curprot.component==partfm].amount.iloc[0]
vectorFm = curprot[curprot.component==vectorfm].amount.iloc[0]
else:
curprot = ggaPD
partsFm = ggaFm
vectorFm = ggavecGm
if(selenzyme == "gibson"):
#for gibson assembly the protocol is different
curprot = gibassyPD
partsFm = gibFm
vectorFm = gibvecFm
water = float(curprot[curprot.component=="dnasln"].volume)*1000 #total amount of water, to start with
if(printstuff):
print("assembling with "+selenzyme)
aind = assembly.index[0] #necessary for dataframes probably because I'm dumb
frags = []
if(not selenzyme == "gibson"):
enzyme = enzymes[selenzyme]
esite = enzyme.site.lower()
esiterc = str(Dseq(enzyme.site).rc()).lower()
for col in assembly:
if(col=="targwell"):#since every row is terminated by the "target well",
#we'll take this opportunity to put in the water
if(int(water) <25):
#echo gets mad if you tell it to pipet significantly less than 25 nl
water = 25
ewat = int(water) #the echo automatically rounds to the nearest 25,
#so it's not really necessary to round here.
#dsrfrags = [Dseqrecord(a) for a in frags]
#x = pydAssembly(dsrfrags,limit = 4)
#print(frags)
#print(len(frags))
allprod= []
nefrags = []
cutfrags = []
if(selenzyme != "gibson"):
enzyme = enzymes[selenzyme]
for frag in frags:
if(selenzyme == "gibson"):
if(len(frag)>chewnt*2+1):
nefrags += [chewback(frag,chewnt)]
else:
raise ValueError("part with sequence "+frag+" is too "+\
"short for gibson! (<= 80 nt)")
else:
newpcs = frag.cut(enzyme)
if(len(newpcs) == 0):
newpcs+=[frag]
for pcs in newpcs:
if(pcs.find(esite)+pcs.find(esiterc)==-2):
nefrags+=[pcs]
allprod = DPallCombDseq(nefrags)
if(printstuff):
print("found {} possible products".format(len(allprod)))
goodprod = []
newpath = os.path.join(dnaPath,fname)
if(printstuff):
print("saving in folder {}".format(newpath))
Cname = ""
try:
#this part gathers the "name" column to create the output sequence
Cname = assembly.name[assembly.name.index[0]]
except KeyError:
Cname = ""
if(Cname == "" or str(Cname) == "nan"):
Cname = "well"+dwell
if(printstuff):
print("Parts in construct {}".format(Cname))
if not os.path.exists(newpath):
if(printstuff):
print("made dirs!")
os.makedirs(newpath)
num = 0
for prod in allprod:
Cnamenum = Cname
#filename = Cname+".gbk"
if(len(allprod) > 1):
#filename = Cname+"_"+str(num)+".gbk"
#wout = open(os.path.join(newpath,filename),"w")
Cnamenum = Cname+"_"+str(num)
else:
pass
#wout = open(os.path.join(newpath,filename),"w")
if((bluntLeft(prod) and bluntRight(prod)) or (prod.circular)):
num+=1
goodprod+=[prod]
#topo = ["linear","circular"][int(prod.circular)]
booltopo = ["FALSE","TRUE"][int(prod.circular)]
#wout.write("\r\n>Construct"+str(num)+"_"+topo)
un_prod = "_".join(Cnamenum.split())
#wout.write("LOCUS {} {} bp ds-DNA {} SYN 01-JAN-0001\n".format(un_prod,len(prod),topo))
#wout.write("ORIGIN\n")
#wout.write(str(prod)+"\n//")
now = datetime.datetime.now()
nowdate = "{}/{}/{}".format(now.month,now.day,now.year)
prod.name = Cnamenum
plt.figure(figsize=(8,1))
ax = plt.gca()
drawConstruct(ax,prod,annotateDF=annotateDF)
plt.show()
prod.write(os.path.join(newpath,Cnamenum+".gbk"))
prodSeqSpread += "{},{},assembled with {},,,,30,{},,{},{},{},{},{}\n".format(\
dwell,un_prod, selenzyme,nowdate,prod.seq,booltopo,0,0,len(prod))
#wout.close()
assembend = ["y","ies"][int(len(goodprod)>1)]
if(printstuff):
print("Detected {} possible assembl{}".format(len(goodprod),assembend))
frags = []
if(water <=0):
print("WARNING!!!! water <=0 in well {}".format(dwell))
else:
#print("water from {} to {}, {} nl".format(waterwell,dwell,ewat))
if(prevplate == None):
#print("normalwater")
#im not convinced this ever gets triggered
#but just in case, i guess we can find the first water well
waterrows=parts[parts.part=="water"]
if(len(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
platetype= waterrow.platetype
curplatebc = waterrow.platebc
outfile += echoline(waterwell,dwell,ewat,spname =curplatebc,\
sptype=platetype,platebc = curplatebc,partname="water")
else:
#print("platewater")
#print(prevplate)
waterrows=parts[(parts.part=="water") & (parts.platebc==prevplate)]
if(len(waterrows)==0):
raise KeyError("no water wells indicated!")
#print(waterrows)
waterrow = waterrows.iloc[0]
waterwell = waterrow.well
watline = echoline(waterwell,dwell,ewat,spname =prevplate,\
sptype=prevtype,platebc = prevplate,partname="water")
if("LDV" in prevtype):
outfile2+=watline
else:
outfile += watline
#add water to the well!
if(printstuff):
print("")
elif(col in ["comment","enzyme","name"]):#skip this column!
pass
else:
#this is the part name from the "assembly" file
part = assembly[col][aind]
if(str(part) == 'nan'):
#this means we skip this part, because the name is empty
if(printstuff):
print("skip one!")
else:
#shouldnt need to define "part" again??
#part = assembly[col][aind]
#this is the name of the part!
#parts[parts.part==assembly[col][aind]].well.iloc[0]
evol = 0
if(':' in str(part)):
#this means we have multiple parts to mix!
subparts = part.split(':')
t_partsFm = partsFm/len(subparts)
t_vecFm = vectorFm/len(subparts)
for subpart in subparts:
useFm = t_partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = t_vecFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
subpart,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
else:
useFm = partsFm
if(col == "vector"):
#use the vector at lower concentration!!
useFm = vectorFm
e1,e2,pDseq,prevplate,prevtype = echoSinglePart(parts,\
part,useFm,dwell,printstuff=printstuff,enzyme=enzyme)
frags+=[pDseq]
evol += e2
if(sepfiles):
if("LDV" in e1):
outfile2+=e1
else:
outfile+= e1
else:
outfile+= e1
water=water-evol
pspread = open(os.path.join(newpath,fname+".csv"),"w")
pspread.write(prodSeqSpread)
pspread.close()
seqdispDF = pd.read_csv(os.path.join(newpath,fname+".csv"),usecols=["well","part","circular","length"])
display(seqdispDF)
display(FileLink(os.path.join(newpath,fname+".csv")))
if(len(outfile)>f1init):
ofle = open(output,"w")
ofle.write(outfile)
ofle.close()
display(FileLink(output))
if(sepfiles and (len(outfile2) > f2init)):
if(printstuff):
print("wrote LDV steps in {}".format(sepfilename))
ofle2 = open(sepfilename,"w")
ofle2.write(outfile2)
ofle2.close()
display(FileLink(sepfilename))
outitems = []
class assemblyFileMaker():
def __init__(self,mypath=".",partsdf = None):
self.p = partsdf
self.holdup=False
self.ddlay = widgets.Layout(width='75px',height='30px')
self.eblay = widgets.Layout(width='50px',height='30px')
self.lsblay = widgets.Layout(width='140px',height='30px')
self.sblay = widgets.Layout(width='100px',height='30px')
self.rsblay = widgets.Layout(width='60px',height='30px')
self.Vboxlay = widgets.Layout(width='130px',height='67px')
self.textlay = widgets.Layout(width='200px',height='30px')
self.PlateLetters="ABCDEFGHIJKLMNOP"
self.PlateNumbers=(1,2,3,4,5,6,7,8,9,10,11,12,\
13,14,15,16,17,18,19,20,21,22,23,24)
self.PlateRowsCols=(16,24)
self.mypath = mypath
if(type(self.p)==pd.DataFrame):
self.parts={"google doc":"google doc"}
else:
self.parts = findPartsListsDict(os.path.join(self.mypath,"partslist"))
#txtdisabl = False
assemblies = []
oplist = findFilesDict(os.path.join(mypath,"assemblies"))
#parts = findPartsListsDict(os.path.join(mypath,"partslist"))
self.loadFIleList = widgets.Dropdown(
options=oplist,
#value=2,
layout=self.lsblay,
description='',
)
self.loadbut = widgets.Button(
description='Load',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.rsblay,
tooltip='Click to load an existing file',
)
self.listEverything = widgets.Checkbox(
value=False,
description='List all parts',
disabled=False
)
self.fname1 = widgets.Text(
value="untitled",
placeholder = "type something",
description='Assembly File Name:',
layout=self.textlay,
disabled=False
)
self.DestWell = widgets.Text(
value="A1",
placeholder = "type something",
description='Dest Well:',
layout=self.Vboxlay,
disabled=True
)
self.AddCols = widgets.IntText(
value=0,
placeholder = "type something",
description='Extra Cols:',
layout=self.Vboxlay,
#disabled=True
)
self.drop2 = widgets.Dropdown(
options=self.parts,
width=100,
#value=2,
description='parts list:',
layout=self.textlay,
)
#print(self.drop2.style.keys)
self.but = widgets.Button(
description='New...',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
layout=self.sblay,
tooltip='Click to start adding assemblies',
#icon='check'
)
self.finbut = widgets.Button(
description='Save!',
disabled=True,
button_style='warning',#, 'danger' or ''
layout=self.sblay,
tooltip='Finish and Save',
#icon='check'
)
self.but.on_click(self.on_button_clicked)
self.finbut.on_click(self.finishAndSave)
self.loadbut.on_click(self.loadFile_clicked)
self.listEverything.observe(self.on_listEverything_changed,names='value')
self.cbox = widgets.HBox([
widgets.VBox([self.fname1,widgets.HBox([self.loadFIleList,self.loadbut]),self.listEverything]),\
widgets.VBox([self.drop2,widgets.HBox([self.DestWell,self.AddCols])]),\
widgets.VBox([self.but,self.finbut],layout=self.Vboxlay)])
display(self.cbox)
def add_row(self,b):
thisrow = int(b.tooltip[4:])
self.addWidgetRow(labonly=False,copyrow=thisrow)
outcols = [widgets.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#b.disabled=True
#print(b)
def remove_row(self,b):
thisrow = int(b.tooltip[4:])
#outcolnum=0
cleared = False
for colnum in list(range(len(self.outitems))[:-3])\
+[len(self.outitems)-2]:
pvalue = self.outitems[colnum][thisrow].value
if(pvalue != ""):
cleared = True
self.outitems[colnum][thisrow].value = ""
if(cleared):
return
for colnum in range(len(self.outitems)):
self.outitems[colnum]=self.outitems[colnum][:thisrow]+\
self.outitems[colnum][thisrow+1:]
#outcolnum +=1
newbutcol = []
newrow = 0
for a in self.outitems[-1]:
#print(a)
try:
a.children[0].tooltip = "row "+str(newrow)
a.children[1].tooltip = "row "+str(newrow)
if(len(self.outitems[0])<=2):
a.children[1].disabled=True
else:
a.children[1].disabled=False
except AttributeError:
pass
newrow +=1
outcols = [widgets.VBox(a) for a in self.outitems ]
self.bigSheet.children=outcols
#print(b)
def generateOptionsList(self,df,colname,prevval=None,listmode=0):
"""come up with a list of options given a column name. This contains
a ton of specific code"""
oplist = []
if(listmode == 1 and colname != "enzyme"):
oplist = sorted(list(df.part))+[""]
else:
if("vector" in colname):
oplist = sorted(list(df[(df.type=="UNS")|\
(df.type=="vector")].part))+[""]
elif(colname=="enzyme"):
oplist =enlist
if(prevval == ""):
prevval = enlist[0]
else:
oplist = sorted(list(df[df.type==colname].part))+[""]
if(not (prevval in oplist)):
oplist+=[prevval]
return oplist,prevval
def on_listEverything_changed(self,change):
"""this triggers when you change the value of "listEverything".
Here we want to change the values in the drop down to correspond to
either
(a) surrounding parts or
(b) the appropriate category
"""
self.updatePartOptions(None)
"""
typewewant = type(widgets.Dropdown())
#this means we checked the box. Now change drop box's options
for col in self.outitems:
for item in col:
if(type(item)==typewewant):
oplist,pval = self.generateOptionsList(self.p,\
col[0].value,item.value,change['new'])
item.options=oplist
item.value=pval
#"""
def loadFile_clicked(self,b):
"""loads a file from memory, instead of making a brand new one!"""
self.on_button_clicked(b,loadFile=self.loadFIleList.value)
def on_button_clicked(self,b,loadFile=None):
"""start making the assembly! THis part loads the first row of parts
drop downs and populates them with options!"""
#txtdisabl = True
b.disabled=True
self.but.disabled = True
self.drop2.disabled = True
self.finbut.disabled = False
self.DestWell.disabled = False
self.AddCols.disabled = True
self.loadFIleList.disabled=True
self.loadbut.disabled=True
if(loadFile!=None):
#this should read the file
self.fname1.value=os.path.splitext(os.path.split(loadFile)[1])[0]
ftoload = pd.read_csv(loadFile).fillna('')
try:
ftoload = ftoload.drop('comment',axis=1)
except (ValueError,KeyError) as e:
#if this happens then 'comment' was already not there. great!
pass
self.AddCols.value=len(ftoload.columns)-9
if(not(type(self.p)==pd.DataFrame)):
dfs = pd.read_excel(self.drop2.value,None)
sheetlist = list(dfs.keys())
self.p = pd.DataFrame.append(dfs["parts_1"],dfs["Gibson"])
self.collabels = ["vector1","promoter","UTR","CDS","Terminator","vector2","enzyme","name",""]
if(self.AddCols.value>0):
newclabeld = self.collabels
for x in range(self.AddCols.value):
newclabeld=newclabeld[:-4]+["newcol"+str(x+1)]+newclabeld[-4:]
self.collabels = newclabeld
self.outitems = []
self.addWidgetRow(labonly=True)
if(loadFile==None):
self.addWidgetRow(labonly=False)
else:
#print(loadFile)
findex = ftoload.index
first = True
for findex in ftoload.index:
dfrow = ftoload.iloc[findex]
currow = list(dfrow)
if(first):
self.DestWell.value=dfrow.targwell
#extracols =
#startpos =
first=False
currow = list(dfrow.drop(['targwell','name','enzyme']))\
+[dfrow.enzyme]+[dfrow["name"]]
self.addWidgetRow(labonly=False,copyrow=currow)
#self.updatePartOptions()
#readindex = ftoload.index()
outcols = [widgets.VBox(a) for a in self.outitems ]
self.bigSheet=widgets.HBox(outcols)
display(self.bigSheet)
def updatePartOptions(self,b=None):
"""update the options available to each drop down, according to what
values are chosen in the other drop downs. For example, only allow
parts which are compatible"""
if(self.holdup):
return
self.holdup=True
maxcols = len(self.outitems)-3
for colnum in range(maxcols):
for itemnum in range(len(self.outitems[colnum]))[1:]:
curitem = self.outitems[colnum][itemnum]
leftitem = 0
rightitem = 0
if(colnum == 0):
leftitem = maxcols-1
else:
leftitem = colnum-1
if(colnum == maxcols-1):
rightitem = 0
else:
rightitem=colnum+1
leftoverhang = ""
rightoverhang = ""
leftvalue = self.outitems[leftitem][itemnum].value
rightvalue = self.outitems[rightitem][itemnum].value
logiclist = np.array([True]*len(self.p))
if(leftvalue!=""):
try:
leftoverhang=self.p[self.p.part == leftvalue].right.iloc[0]
except IndexError:
#this means we didn't find the part!
raise ValueError("part {} has incorrect right overhang!".format(leftvalue))
if((self.outitems[-3][itemnum].value!='gibson') \
and ('UNS' in leftoverhang)):
pass
else:
logiclist &= (self.p.left==leftoverhang)
#print(leftoverhang)
if(rightvalue!=""):
try:
rightoverhang=self.p[self.p.part == rightvalue].left.iloc[0]
except IndexError:
raise ValueError("part {} has incorrect right overhang!".format(rightvalue))
if((self.outitems[-3][itemnum].value!='gibson') \
and ('UNS' in rightoverhang)):
pass
else:
logiclist &= (self.p.right==rightoverhang)
#print(rightoverhang)
#print("this part wants {} and {}".format(leftoverhang,rightoverhang))
self.holdup=True
prevval = curitem.value
oplist,newval = self.generateOptionsList(self.p[logiclist],\
self.outitems[colnum][0].value,\
prevval,self.listEverything.value)
curitem.options = oplist
curitem.value = newval
self.holdup=False
def incrementWellPos(self,position):
"""increments a 384 well plate location such as A1 to the next logical
position, going left to right, top to bottom"""
poslet = self.PlateLetters.index(position[0])
posnum = int(position[1:])
newposlet = poslet
newposnum = posnum+1
if(newposnum > self.PlateRowsCols[1]):
newposnum-=self.PlateRowsCols[1]
newposlet+=1
newposition = self.PlateLetters[newposlet]+str(newposnum)
return newposition
def finishAndSave(self,b):
outfiletext = ",".join(self.collabels[:-1]+["targwell"])+"\n"
outfname = self.fname1.value+".csv"
startPos = self.DestWell.value
curpos = startPos
for i in range(len(self.outitems[0]))[1:]:
outlst = []
for nam,col in zip(self.collabels,self.outitems):
if(nam != ""):
outlst+=[col[i].value]
outlst+=[curpos]
curpos = self.incrementWellPos(curpos)
outfiletext+=",".join(outlst)+"\n"
with open(os.path.join(self.mypath,"assemblies",outfname),"w") as outfle:
outfle.write(outfiletext)
assemfpath = os.path.join(self.mypath,"assemblies",outfname)
#print("wrote {}".format())
display(FileLink(assemfpath))
display(pd.read_csv(os.path.join(self.mypath,"assemblies",outfname)))
#b.disabled=True
def addWidgetRow(self,labonly=True,copyrow=None):
outcolnum=0
for col in self.collabels:
if(labonly):
interwidg = widgets.Label(col)
else:
if(col=="name"):
newname = ""
#print(copyrow)
if(type(copyrow)==list):
newname = copyrow[outcolnum]
elif(type(copyrow)==int):
oldname = self.outitems[outcolnum][copyrow].value
newname = incrementString(oldname)
interwidg = widgets.Text(\
layout=self.ddlay,\
value=str(newname))
elif(col==""):
but1 = widgets.Button(\
description='+',
button_style='success',
tooltip='row '+str(len(self.outitems[0])-1),
layout=self.eblay
)
but2 = widgets.Button(\
description='-',
button_style='danger',
tooltip='row '+str(len(self.outitems[0])-1),
layout=self.eblay,
#disabled=disbut
)
but1.on_click(self.add_row)
but2.on_click(self.remove_row)
interwidg =widgets.HBox([but1,but2])
else:
oplist = []
prevval = ""
if(type(copyrow)==int):
prevval = self.outitems[outcolnum][copyrow].value
elif(type(copyrow)==list):
prevval = copyrow[outcolnum]
oplist, prevval = self.generateOptionsList(self.p,col,\
prevval,self.listEverything.value)
#print(oplist)
#print("value is")
#print(prevval)
interwidg = widgets.Dropdown(\
options=oplist,\
value=prevval,\
layout=self.ddlay)
interwidg.observe(self.updatePartOptions,names='value')
try:
self.outitems[outcolnum]+=[interwidg]
except IndexError:
self.outitems+=[[interwidg]]
outcolnum +=1
self.updatePartOptions()
for a in self.outitems[-1]:
try:
if(len(self.outitems[0])<=2):
a.children[1].disabled=True
else:
a.children[1].disabled=False
except AttributeError:
pass
def make_assembly_file(mypath=".",externalDF = None):
"""this function will assist the user with making assembly .csv files!"""
x=assemblyFileMaker(mypath=mypath,partsdf=externalDF)
def process_assembly_file(mypath=".",printstuff=True,partsdf=None,annotateDF=None):
oplist = findFilesDict(os.path.join(mypath,"assemblies"))
if(type(partsdf)==pd.DataFrame):
parts = {"google doc":"google doc"}
else:
parts = findPartsListsDict(os.path.join(mypath,"partslist"))
drop1 = widgets.Dropdown(
options=oplist,
#value=2,
description='Assembly:',
)
drop2 = widgets.Dropdown(
options=parts,
#value=2,
description='parts list:',
)
but = widgets.Button(
description='Select',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
#icon='check'
)
#button = widgets.Button(description="Click Me!")
#display(button)
#print(oplist)
def on_button_clicked(b):
pbar = widgets.FloatProgress(
min=0,
max=1.0
)
display(pbar)
if(drop1.value[-4:]=="xlsx" or drop1.value[-3:]=="xls"):
x=pd.read_excel(drop1.value)
else:
x=pd.read_csv(drop1.value)
if(type(partsdf)==pd.DataFrame):
p = partsdf
else:
dfs = pd.read_excel(drop2.value,None)
#print(drop1.value)
sheetlist = list(dfs.keys())
p = | pd.DataFrame.append(dfs["parts_1"],dfs["Gibson"]) | pandas.DataFrame.append |
from pandas import Series
class Evaluator:
series: Series
def __init__(self, series: Series):
self.series = series
self.unique_series = [value for value in self.series.dropna().unique()]
def series_match(self, pattern: str):
return Series(self.unique_series).astype(str).str.match(pattern).eq(True).all()
def series_contains(self, pattern: str):
return | Series(self.unique_series) | pandas.Series |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_url_tls() -> str:
conn = os.environ["POSTGRES_URL_TLS"]
return conn
@pytest.fixture(scope="module") # type: ignore
def postgres_rootcert() -> str:
cert = os.environ["POSTGRES_ROOTCERT"]
return cert
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(postgres_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(postgres_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(postgres_url: str) -> None:
query = "select MAX(test_int), MIN(test_int) from test_table"
df = read_sql(postgres_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(postgres_url: str) -> None:
query = "select increment(test_int) as test_int from test_table ORDER BY test_int"
df = read_sql(postgres_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df = df.sort_values("test_int").reset_index(drop=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(postgres_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(postgres_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 0, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, 5, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "a", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 3.1, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, None, False, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_without_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_without_partition_range(postgres_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="Int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(["a", "c"], dtype="object"),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series([None, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_selection(postgres_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_projection(postgres_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_join(postgres_url: str) -> None:
query = "SELECT T.test_int, T.test_bool, S.test_language FROM test_table T INNER JOIN test_str S ON T.test_int = S.id"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(5),
data={
"test_int": pd.Series([0, 1, 2, 3, 4], dtype="Int64"),
"test_bool": pd.Series([None, True, False, False, None], dtype="boolean"),
"test_language": pd.Series(
["English", "中文", "日本語", "русский", "Emoji"], dtype="object"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_spja(postgres_url: str) -> None:
query = "select test_bool, AVG(test_float) as avg, SUM(test_int) as sum from test_table as a, test_str as b where a.test_int = b.id AND test_nullint is not NULL GROUP BY test_bool ORDER BY sum"
df = read_sql(postgres_url, query, partition_on="sum", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([True, False, None], dtype="boolean"),
"avg": pd.Series([None, 3, 5.45], dtype="float64"),
"sum": pd.Series([1, 3, 4], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_on_utf8(postgres_url: str) -> None:
query = "SELECT * FROM test_str"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(9),
data={
"id": pd.Series([0, 1, 2, 3, 4, 5, 6, 7, 8], dtype="Int64"),
"test_language": pd.Series(
[
"English",
"中文",
"日本語",
"русский",
"Emoji",
"Latin1",
"Extra",
"Mixed",
"",
],
dtype="object",
),
"test_hello": pd.Series(
[
"Hello",
"你好",
"こんにちは",
"Здра́вствуйте",
"😁😂😜",
"¥§¤®ð",
"y̆",
"Ha好ち😁ðy̆",
None,
],
dtype="object",
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_index_col(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(postgres_url, query, index_col="test_int")
expected = pd.DataFrame(
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
expected.set_index("test_int", inplace=True)
assert_frame_equal(df, expected, check_names=True)
def test_types_binary(postgres_url: str) -> None:
query = "SELECT test_date, test_timestamp, test_timestamptz, test_int16, test_int64, test_float32, test_numeric, test_bpchar, test_char, test_varchar, test_uuid, test_time, test_json, test_jsonb, test_bytea, test_enum, test_f4array, test_f8array, test_narray, test_i2array, test_i4array, test_i8array FROM test_types"
df = read_sql(postgres_url, query, partition_on="test_int16", partition_num=3)
expected = pd.DataFrame(
index=range(4),
data={
"test_date": pd.Series(
["1970-01-01", "2000-02-28", "2038-01-18", None], dtype="datetime64[ns]"
),
"test_timestamp": pd.Series(
[
"1970-01-01 00:00:01",
"2000-02-28 12:00:10",
"2038-01-18 23:59:59",
None,
],
dtype="datetime64[ns]",
),
"test_timestamptz": pd.Series(
[
"1970-01-01 00:00:01",
"2000-02-28 16:00:10",
"2038-01-18 15:59:59",
None,
],
dtype="datetime64[ns]",
),
"test_int16": pd.Series([0, 1, 2, 3], dtype="Int64"),
"test_int64": pd.Series(
[-9223372036854775808, 0, 9223372036854775807, None], dtype="Int64"
),
"test_float32": pd.Series(
[None, 3.1415926535, 2.71, -1e-37], dtype="float64"
),
"test_numeric": pd.Series([None, 521.34, 999.99, 0.00], dtype="float64"),
"test_bpchar": pd.Series(["a ", "bb ", "ccc ", None], dtype="object"),
"test_char": pd.Series(["a", "b", None, "d"], dtype="object"),
"test_varchar": pd.Series([None, "bb", "c", "defghijklm"], dtype="object"),
"test_uuid": pd.Series(
[
"86b494cc-96b2-11eb-9298-3e22fbb9fe9d",
"86b49b84-96b2-11eb-9298-3e22fbb9fe9d",
"86b49c42-96b2-11eb-9298-3e22fbb9fe9d",
None,
],
dtype="object",
),
"test_time": pd.Series(
["08:12:40", None, "23:00:10", "18:30:00"], dtype="object"
),
"test_json": pd.Series(
[
'{"customer":"<NAME>","items":{"product":"Beer","qty":6}}',
'{"customer":"<NAME>","items":{"product":"Diaper","qty":24}}',
'{"customer":"<NAME>","items":{"product":"Toy Car","qty":1}}',
None,
],
dtype="object",
),
"test_jsonb": pd.Series(
[
'{"qty":6,"product":"Beer"}',
'{"qty":24,"product":"Diaper"}',
'{"qty":1,"product":"Toy Car"}',
None,
],
dtype="object",
),
"test_bytea": pd.Series(
[
None,
b"\xd0\x97\xd0\xb4\xd1\x80\xd0\xb0\xcc\x81\xd0\xb2\xd1\x81\xd1\x82\xd0\xb2\xd1\x83\xd0\xb9\xd1\x82\xd0\xb5",
b"",
b"\xf0\x9f\x98\x9c",
],
dtype="object",
),
"test_enum": pd.Series(
["happy", "very happy", "ecstatic", None], dtype="object"
),
"test_f4array": pd.Series(
[[], None, [123.123], [-1e-37, 1e37]], dtype="object"
),
"test_f8array": pd.Series(
[[], None, [-1e-307, 1e308], [0.000234, -12.987654321]], dtype="object"
),
"test_narray": pd.Series(
[[], None, [521.34], [0.12, 333.33, 22.22]], dtype="object"
),
"test_i2array": pd.Series(
[[-1, 0, 1], [], [-32768, 32767], None], dtype="object"
),
"test_i4array": pd.Series(
[[-1, 0, 1123], [], [-2147483648, 2147483647], None], dtype="object"
),
"test_i8array": pd.Series(
[[-9223372036854775808, 9223372036854775807], [], [0], None],
dtype="object",
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_types_csv(postgres_url: str) -> None:
query = "SELECT test_date, test_timestamp, test_timestamptz, test_int16, test_int64, test_float32, test_numeric, test_bpchar, test_char, test_varchar, test_uuid, test_time, test_json, test_jsonb, test_bytea, test_enum::text, test_f4array, test_f8array, test_narray, test_i2array, test_i4array, test_i8array FROM test_types"
df = read_sql(
postgres_url, query, protocol="csv", partition_on="test_int16", partition_num=2
)
expected = pd.DataFrame(
index=range(4),
data={
"test_date": pd.Series(
["1970-01-01", "2000-02-28", "2038-01-18", None], dtype="datetime64[ns]"
),
"test_timestamp": pd.Series(
[
"1970-01-01 00:00:01",
"2000-02-28 12:00:10",
"2038-01-18 23:59:59",
None,
],
dtype="datetime64[ns]",
),
"test_timestamptz": pd.Series(
[
"1970-01-01 00:00:01",
"2000-02-28 16:00:10",
"2038-01-18 15:59:59",
None,
],
dtype="datetime64[ns]",
),
"test_int16": | pd.Series([0, 1, 2, 3], dtype="Int64") | pandas.Series |
from collections import defaultdict
import os
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
import torch
from .transforms.util import get_transforms
from .engine.voc_assayer import VOCAssayer
from .datasets import VOCDetection
from .utils.general import make_save_path
def assay(csv_file,
net_name,
number_nets_to_train,
epochs_list,
batch_size,
restore_path,
test_results_save_path,
configfile,
random_seed,
root=None,
num_classes=2,
pad_size=500,
embedding_n_out=512,
loss_func='CE',
method='transfer',
mode='classify',
num_workers=4,
data_parallel=False):
"""assay behavior of models trained with Pascal VOC Detection set
Parameters
----------
csv_file : str
name of .csv file containing prepared data sets.
Generated by searchnets.data.split function.
net_name : str
name of convolutional neural net architecture to train.
One of {'alexnet', 'VGG16', 'CORnet_Z', 'CORnet_S'}
number_nets_to_train : int
number of training "replicates"
epochs_list : list
of training epochs. Replicates will be trained for each
value in this list. Can also just be one value, but a list
is useful if you want to test whether effects depend on
number of training epochs.
batch_size : int
number of samples in a batch of training data
restore_path : str
path to directory where checkpoints and train models were saved
test_results_save_path : str
path to directory where results from measuring accuracy on test set should be saved
configfile : str
filename of config.ini file. Used (without .ini extension) as name for output file
that is saved in test_results_save_path.
random_seed : int
to seed random number generator
root : str
path to dataset root. Used with VOCDetection dataset to specify where VOC data was downloaded to.
num_classes : int
number of classes. Default is 2 (target present, target absent).
pad_size : int
size to which images in PascalVOC / Visual Search Difficulty dataset should be padded.
Images are padded by making an array of zeros and randomly placing the image within it
so that the entire image is still within the boundaries of (pad size x pad size).
Default value is specified by searchnets.transforms.functional.VSD_PAD_SIZE.
Argument has no effect if the dataset_type is not 'VOC'.
Used to determine transforms to use at test time.
loss_func : str
type of loss function to use. One of {'CE', 'BCE'}. Default is 'CE',
the standard cross-entropy loss. Used to determine transforms to use at test time.
num_workers : int
number of workers used by torch.DataLoaders. Default is 4.
data_parallel : bool
if True, use torch.nn.dataparallel to train network on multiple GPUs. Default is False.
method : str
training method. One of {'initialize', 'transfer'}.
'initialize' means randomly initialize all weights and train the
networks "from scratch".
'transfer' means perform transfer learning, using weights pre-trained
on imagenet.
Default is 'transfer'.
mode : str
training mode. One of {'classify', 'detect'}.
'classify' is standard image classification.
'detect' trains to detect whether specified target is present or absent.
Default is 'classify'.
embedding_n_out : int
for DetectNet, number of output features from input embedding.
I.e., the output size of the linear layer that accepts the
one hot vector querying whether a specific class is present as input.
Default is 512.
Returns
-------
None
saves .npz output file with following keys:
arrays_per_model_dict : dict
where keys are paths to model and values are array
of predictions made by that model for test set
"""
if mode == 'detect' and loss_func != 'BCE':
print(
f"when mode is 'detect', loss_func must be 'BCE', but was {loss_func}. Setting to 'BCE."
)
loss_func = 'BCE'
if random_seed:
np.random.seed(random_seed) # for shuffling in batch_generator
torch.manual_seed(random_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
for epochs in epochs_list:
print(f'assaying behavior on test set for {net_name} model trained for {epochs} epochs')
# ------ initialize variables to hold outputs from all training replicates ------------------------------------
# ---- for VSD save results.gz **and** a .csv, because we have multiple metrics,
# and because csv files are better anyway
assay_records = defaultdict(list) # records gets turned into pandas DataFrame, then saved as .csv
# these will be lists of DataFrames, one for each training replicate
df_lists = defaultdict(list)
arrays_per_model = {} # inputs/outputs of model, where key is restore path, and value is dict of arrays
for net_number in range(1, number_nets_to_train + 1):
transform, target_transform = get_transforms('VSD', loss_func, pad_size)
testset = VOCDetection(root=root,
csv_file=csv_file,
image_set='trainval',
split='test',
download=True,
transform=transform,
target_transform=target_transform)
restore_path_this_net = make_save_path(restore_path, net_name, net_number, epochs)
print(f'Loading model from {restore_path_this_net}')
assayer = VOCAssayer.from_config(net_name=net_name,
num_classes=num_classes,
loss_func=loss_func,
testset=testset,
mode=mode,
embedding_n_out=embedding_n_out,
restore_path=restore_path_this_net,
batch_size=batch_size,
device=device,
num_workers=num_workers,
data_parallel=data_parallel)
results = assayer.assay()
# --- add columns to image + trial dataframes before appending to list
for key in ('images_df', 'trials_df'):
df = results[key]
df['net_name'] = net_name
df['replicate'] = net_number
df['mode'] = mode
df['method'] = method
df['loss_func'] = loss_func
df['restore_path'] = restore_path_this_net
df_lists[key].append(df)
# ---- add columns + metrics to our 'results across replicates' records for that data frame
assay_records['net_name'].append(net_name)
assay_records['replicate'].append(net_number)
assay_records['mode'].append(mode)
assay_records['method'].append(method)
assay_records['loss_func'].append(loss_func)
assay_records['restore_path'] = restore_path_this_net
for metric in ['acc', 'd_prime']:
assay_records[metric].append(results[metric])
results_str = ', '.join(
[f'{key}: {results[key]:7.3f}'
for key in ['acc', 'd_prime']]
)
print(f'assay results: {results_str}')
arrays_per_model[restore_path_this_net] = results['arrays']
# ---- create results dict, save to results.gz file
if not os.path.isdir(test_results_save_path):
os.makedirs(test_results_save_path)
results_fname_stem = str(Path(configfile).stem) # remove .ini extension
arrays_fname = os.path.join(test_results_save_path,
f'{results_fname_stem}_trained_{epochs}_epochs_assay_arrays.gz')
joblib.dump(arrays_per_model, arrays_fname)
summary_csv_fname = os.path.join(test_results_save_path,
f'{results_fname_stem}_trained_{epochs}_epochs_assay_results.csv')
results_df = pd.DataFrame.from_records(assay_records)
results_df.to_csv(summary_csv_fname, index=False)
for key, df_list in df_lists.items():
csv_fname = f"{results_fname_stem}_trained_{epochs}_epochs_assay_{key.replace('_df', '')}.csv"
csv_path = os.path.join(test_results_save_path, csv_fname)
df = | pd.concat(df_list) | pandas.concat |
#!/usr/bin/env python
"""Local and remote asteroid name resolution."""
import asyncio
import re
import warnings
import aiohttp
import nest_asyncio
import numpy as np
import pandas as pd
from rich.progress import Progress
import rocks
# Run asyncio nested for jupyter notebooks, GUIs, ...
nest_asyncio.apply()
def identify(id_, return_id=False, local=True, progress=False):
"""Resolve names and numbers of one or more minor bodies using identifiers.
Parameters
----------
id_ : str, int, float, list, range, set, np.ndarray, pd.Series
One or more identifying names or numbers to resolve.
return_id : bool
Return the SsODNet ID of the asteroid as third member of
the tuple. Default is False.
local : bool
Try resolving the name locally first. Default is True.
progress : bool
Show progress bar. Default is False.
Returns
-------
tuple, list of tuple : (str, int, str), (None, np.nan, None)
List containing len(id_) tuples. Each tuple contains the asteroid's
name, number, and the SsODNet ID if return_id=True. If the resolution
failed, the values are None for name and SsODNet and np.nan for the
number. If a single identifier is resolved, a tuple is returned.
Notes
-----
Name resolution is first attempted locally, then remotely via quaero. If
the asteroid is unnumbered, its number is np.nan.
"""
# ------
# Verify input
if isinstance(id_, (str, int, float)):
id_ = [id_]
elif isinstance(id_, pd.Series):
id_ = id_.to_list()
elif isinstance(id_, np.ndarray):
id_ = id_.tolist()
elif isinstance(id_, (set, range)):
id_ = list(id_)
elif id_ is None:
warnings.warn(f"Received id_ of type {type(id_)}.")
return (None, np.nan) if not return_id else (None, np.nan, None) # type: ignore
elif not isinstance(id_, (list, np.ndarray)):
raise TypeError(
f"Received id_ of type {type(id_)}, expected one of: "
f"str, int, float, list, set, range, np.ndarray, pd.Series"
)
if not id_:
warnings.warn("Received empty list of identifiers.")
return (None, np.nan) if not return_id else (None, np.nan, None) # type: ignore
# ------
# For a single name, try local lookup right away, async process has overhead
if len(id_) == 1:
success, (name, number, ssodnet_id) = _local_lookup(id_[0])
if success:
if not return_id:
return (name, number)
else:
return (name, number, ssodnet_id)
# ------
# Run asynchronous event loop for name resolution
with Progress(disable=not progress) as progress_bar:
task = progress_bar.add_task("Identifying rocks", total=len(id_)) # type: ignore
loop = asyncio.get_event_loop()
results = loop.run_until_complete(_identify(id_, local, progress_bar, task))
# ------
# Check if any failed due to 502 and rerun them
idx_failed = [
i for i, result in enumerate(results) if result == (None, None, None)
]
if idx_failed:
results = np.array(results)
results[idx_failed] = loop.run_until_complete(
_identify(np.array(id_)[idx_failed], local, progress_bar, task)
)
results = results.tolist()
# ------
# Verify the output format
if not return_id:
results = [r[:2] for r in results]
if len(id_) == 1: # type: ignore
results = results[0]
return results # type: ignore
async def _identify(id_, local, progress_bar, task):
"""Establish the asynchronous HTTP session and launch the name resolution."""
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout()) as session:
tasks = [
asyncio.ensure_future(_resolve(i, session, local, progress_bar, task))
for i in id_
]
results = await asyncio.gather(*tasks)
return results
async def _resolve(id_, session, local, progress_bar, task):
"""Resolve the identifier locally or remotely."""
if | pd.isnull(id_) | pandas.isnull |
import pandas as pd
import xarray as xr
import re
import numpy as np
import datetime as dt
class AWS:
'''This class represents an Automatic Weather Station and its time series'''
def __init__(self, name, code, lat, lon, elev):
self.name = name
self.code = code
self.lat = lat
self.lon = lon
self.elev = elev
self.atmvar = dict()
def add_atmvar(self, name, time_series_dataarray):
self.atmvar[name] = time_series_dataarray
class AWSWriter:
'''This class is responsible for saving a group of AWS as a .csv file'''
pass
class AWSWiscReader:
'''This class reads an AWS from a .txt file from wisc data'''
def read_aws(self, filepath):
aws = self.read_metadata(filepath)
da = self.read_time_series(filepath)
aws.add_atmvar('T2m', da)
return aws
def read_metadata(self, filepath):
with open(filepath) as f:
firstline = f.readline().rstrip()
first_match_obj = re.match( r'Year: (.*) Month: (.*) ID: (.*) ARGOS: (.*) Name: (.*)', firstline)
secondline = f.readline().rstrip()
second_match_obj = re.match( r'Lat: (.*) Lon: (.*) Elev: (.*)', secondline)
return AWS( first_match_obj.group(5).strip(),
first_match_obj.group(3).strip(),
second_match_obj.group(1).strip(),
second_match_obj.group(2).strip(),
second_match_obj.group(3).strip(),
)
def read_time_series(self, filepath):
df = | pd.read_csv(filepath, skiprows=2, header=None, sep='\s+', na_values=444.0) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""ml_package_code.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1slXIOwuXRSfAQpsAAsGs1DIkqMU3hHYZ
"""
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from sklearn.impute import SimpleImputer
from keras.models import Sequential
from keras.layers import Dense
import seaborn as sns
import matplotlib.pyplot as plt
def read_dataset(param):
y_var = param[1]
path = param[0]
ind = param[2]
if ind ==' ':
df = pd.read_csv(path)
y = df[y_var]
x = df.drop(columns=y_var)
print(df.head())
return df,x,y
else:
df = pd.read_csv(path,index_col=ind)
y = df[y_var]
x = df.drop(columns=y_var)
print(df.head())
return df,x,y
def one_hot(x,y,cat=False,exc=False,onlyoutcome=False,onlyx=False):
if cat =='~':
for j in x.columns:
onehot = pd.get_dummies(x[j],prefix=j)
x = x.drop(columns=[j])
x = pd.concat([x,onehot],axis=1)
y = pd.get_dummies(y)
return x,y
elif exc == True:
col = x.columns
col1 = list(col)
for i in cat:
col1.remove(i)
for j in col1:
onehot = pd.get_dummies(x[j],prefix=j)
x = x.drop(columns=[j])
x = pd.concat([x,onehot],axis=1)
y = pd.get_dummies(y)
return x,y
elif onlyoutcome == True:
y = pd.get_dummies(y)
return y
elif onlyx == True:
for j in x.columns:
onehot = pd.get_dummies(x[j],prefix=j)
x = x.drop(columns=[j])
x = pd.concat([x,onehot],axis=1)
return x
else:
print('----')
for i in cat:
onehot = pd.get_dummies(x[i],prefix=i)
x = x.drop(columns=[i])
x = pd.concat([x,onehot],axis=1)
y = | pd.get_dummies(y) | pandas.get_dummies |
"""Performs growth and exchange analysis for several models."""
from cobra.util.solver import interface_to_str, OptimizationError
from micom import load_pickle
from micom.annotation import annotate_metabolites_from_exchanges
from micom.logger import logger
from micom.media import minimal_medium
from micom.workflows.core import workflow, GrowthResults
from micom.workflows.media import process_medium
from os import path
import pandas as pd
DIRECTION = pd.Series(["import", "export"], index=[0, 1])
ARGS = {
"none": {"fluxes": True, "pfba": False},
"minimal imports": {"fluxes": False, "pfba": False},
"pFBA": {"fluxes": True, "pfba": True},
}
def _growth(args):
p, tradeoff, medium, weights, strategy, atol, rtol, presolve = args
com = load_pickle(p)
if atol is None:
atol = com.solver.configuration.tolerances.feasibility
if rtol is None:
rtol = com.solver.configuration.tolerances.feasibility
if presolve:
# looks stupid but that here is to respect the preset
# and there is an auto setting that we want to respect
com.solver.configuration.presolve = presolve
if "glpk" in interface_to_str(com.solver.interface):
logger.error(
"Community models were not built with a QP-capable solver. "
"This means that you did not install CPLEX or Gurobi. "
"If you did install one of the two please file a bug report "
"at https://github.com/micom-dev/micom/issues."
)
return None
ex_ids = [r.id for r in com.exchanges]
logger.info(
"%d/%d import reactions found in model.",
medium.index.isin(ex_ids).sum(),
len(medium),
)
com.medium = medium[medium.index.isin(ex_ids)]
# Get growth rates
args = ARGS[strategy].copy()
args["atol"] = atol
args["rtol"] = rtol
args["fraction"] = tradeoff
try:
sol = com.cooperative_tradeoff(**args)
rates = sol.members
rates["taxon"] = rates.index
rates["tradeoff"] = tradeoff
rates["sample_id"] = com.id
except Exception:
logger.error(
"Could not solve cooperative tradeoff for %s. "
"This can often be fixed by enabling `presolve`, choosing more "
"permissive atol and rtol arguments, or by checking that medium "
"fluxes are > atol." % com.id
)
return None
if strategy == "minimal imports":
# Get the minimal medium and the solution at the same time
med = minimal_medium(
com,
exchanges=None,
community_growth=sol.growth_rate,
min_growth=rates.growth_rate.drop("medium"),
solution=True,
weights=weights,
atol=atol,
rtol=rtol,
)
if med is None:
logger.error(
"The minimal medium optimization failed for %s. "
"This can often be fixed by enabling `presolve`, choosing more "
"permissive atol and rtol arguments, or by checking that medium "
"fluxes are > atol." % com.id
)
return None
sol = med["solution"]
exs = list({r.global_id for r in com.internal_exchanges + com.exchanges})
fluxes = sol.fluxes.loc[:, exs].copy()
fluxes["sample_id"] = com.id
fluxes["tolerance"] = atol
anns = annotate_metabolites_from_exchanges(com)
return {"growth": rates, "exchanges": fluxes, "annotations": anns}
def grow(
manifest,
model_folder,
medium,
tradeoff,
threads=1,
weights=None,
strategy="minimal imports",
atol=None,
rtol=None,
presolve=False,
):
"""Simulate growth for a set of community models.
Note
----
The strategy `mimimal imports` can become unstable for common carbon sources since
it will add in infeasible imports that are very small but import some high-C
molecules. If you use it check that only components from your medium have been used
and molecules that should be essential are indeed consumed.
Parameters
----------
manifest : pandas.DataFrame
The manifest as returned by the `build` workflow.
model_folder : str
The folder in which to find the files mentioned in the manifest.
medium : pandas.DataFrame
A growth medium. Must have columns "reaction" and "flux" denoting
exchange reactions and their respective maximum flux.
tradeoff : float in (0.0, 1.0]
A tradeoff value. Can be chosen by running the `tradeoff` workflow or
by experince. Tradeoff values of 0.5 for metagenomcis data and 0.3 for
16S data seem to work well.
threads : int >=1
The number of parallel workers to use when building models. As a
rule of thumb you will need around 1GB of RAM for each thread.
strategy : str
Computational strategy used to reduce the flux space. Default "pFBA" uses
parsimonious FBA, "minimal imports" used the solution with the smallest
total mass import from the environment, and "none" returns an arbitrary
feasible flux distribution.
weights : str
Only used during the calculaton of the minimal import rates.
Will scale the fluxes by a weight factor. Can either be "mass" which will
scale by molecular mass, a single element which will scale by
the elemental content (for instance "C" to scale by carbon content).
If None every metabolite will receive the same weight.
Will be ignored if `minimize_components` is True.
atol : float
Absolute tolerance for the growth rates. If None will use the solver tolerance.
rtol : float
Relative tolerqance for the growth rates. If None will use the solver tolerance.
presolve : bool
Whether to use the presolver/scaling. Can improve numerical accuracy in some
cases.
Returns
-------
GrowthResults
A named tuple containing the growth rates and exchange fluxes for all
samples/models.
"""
if strategy not in ARGS:
raise ValueError(
"`%s` is not a valid strategy. Must be one of %s!"
% (strategy, ", ".join(ARGS))
)
samples = manifest.sample_id.unique()
paths = {
s: path.join(model_folder, manifest[manifest.sample_id == s].file.iloc[0])
for s in samples
}
medium = process_medium(medium, samples)
args = [
[
p,
tradeoff,
medium.flux[medium.sample_id == s],
weights,
strategy,
atol,
rtol,
presolve,
]
for s, p in paths.items()
]
results = workflow(_growth, args, threads)
if all([r is None for r in results]):
raise OptimizationError(
"All numerical optimizations failed. This indicates a problem "
"with the solver or numerical instabilities. Check that you have "
"CPLEX or Gurobi installed. You may also increase the abundance "
"cutoff to create simpler models."
)
growth = | pd.concat(r["growth"] for r in results if r is not None) | pandas.concat |
# import libraries
import glob
import os
from collections import OrderedDict
from pathlib import Path
import cv2
import face_recognition
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
def wget_video(
name,
url,
cmd="youtube-dl --continue --write-auto-sub --get-thumbnail --write-all-thumbnails --get-description --all-subs -o {} {}",
dir_out=None,
):
"""
Fetch video from youtube
:param dir_out: directory to save to - if directory does not exist, then sets to save in current directory,
:param name: identifier to name video with
:param url: youtube URL to download as MP4
:param cmd: command line call (Note, str.format() assumes 2 placeholders
:return: True if successfully downloaded; else, return False.
"""
if not Path(dir_out).is_dir():
dir_out = ""
try:
if not glob.glob(dir_out + name + "/*.mp4") + glob.glob(
dir_out + name + "/*.mkv"
):
os.system(cmd.format(dir_out + name, url))
return True
finally:
print(name)
return False
def encode_face_files(imfiles):
images = OrderedDict({impath: cv2.imread(impath)[:, :, ::-1] for impath in imfiles})
encodings = {}
for impath, image in images.items():
try:
encodings[impath] = face_recognition.face_encodings(image)[0]
except Exception as e:
print(f"Error encoding {impath} {e.message}")
return encodings
def read_family_member_list(f_csv):
df = pd.read_csv(f_csv)
# df['last'] = df["surname"].apply(lambda x: x.split('.')[0])
df["ref"] = df["firstname"] + "_" + df["surname"]
df = df.loc[df.video.notna()]
df.reset_index(inplace=True)
del df["index"]
return df
def fetch_videos(df, dir_out=None):
df.apply(lambda x: wget_video(x["ref"], x["video"], dir_out=dir_out), axis=1)
def encode_mids(d_mid, f_encodings=None, save_pickle=False):
if f_encodings and Path(f_encodings).is_file():
encodings = pd.read_pickle(f_encodings)
else:
impaths = glob.glob(f"{d_mid}/*.jpg")
encodings = encode_face_files(impaths)
if save_pickle:
f_encodings = f"{d_mid}/encodings.pkl"
| pd.to_pickle(encodings, f_encodings) | pandas.to_pickle |
#!/usr/bin/python
# coding:utf8
# return all stocks information include price and voloum, excluding ST and risk notification stocks
import pandas as pd
import easyquotation
def get_prices():
quotation = easyquotation.use('qq')
stocks_info = quotation.all
return stocks_info
def stockFilter(stock):
head = stock[:1]
return head == "0" or head == "3" or head == "6"
def select():
stocks_info = get_prices()
df = pd.DataFrame(stocks_info).T
print(df.columns)
to_drop = list(df.columns)
for i in range(1,6):
to_drop.remove("ask" + str(i))
to_drop.remove("bid" + str(i))
to_drop.remove("close")
to_drop.remove("turnover")
to_drop.remove("unknown")
to_drop.remove("涨停价")
to_drop.remove("涨跌(%)")
to_drop.remove("跌停价")
to_drop.remove("总市值")
df = df.drop( | pd.Index(to_drop) | pandas.Index |
from repofish.database import get_available_modules, load_module
from repofish.github import search_code
import pickle
import time
import pandas
access_token = "<PASSWORD>"
# Read in list of Github repos
repos_file = "/scratch/PI/russpold/data/PUBMED/repos/repos_github_pandas_df.pkl"
repos = pickle.load(open(repos_file,"rb"))
# Get available modules
modules = get_available_modules()
# An example of running locally
# repos 9935
matches = | pandas.DataFrame() | pandas.DataFrame |
# Import requirements; Note sqlalchemy
import re
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
import sys
def load_data(messages_filepath, categories_filepath):
'''
Description: Load/Extract data from CSV file to a pandas.DataFrame
Arg: file path of the CSV file
Return: Returns DataFrame/data-matrix
'''
df_messages = pd.read_csv(messages_filepath)
df_categories = | pd.read_csv(categories_filepath) | pandas.read_csv |
#!/usr/bin/python
import importlib
# Add page files here
import Grapher
import MainPage
import NewPredictionPage
import AddModelPage
import DevToolsPage
import DNNPage
from pathlib import Path
import tkinter as tk
from tkinter import ttk
import pandas
import os
import Model
LARGE_FONT = ("Verdana", 12, "bold")
SMALL_FONT = ("Verdana", 10)
datafolder = Path("Data/Tickers")
# Base of the user interface; calls pages to be used from frames.
class UserInterface(tk.Tk):
# Easier for reading
def __init__(self, *args, **kwargs):
#======================= Creating the window =====================
super().__init__(*args, **kwargs)
#tk.Tk.iconbitmap(self, default = "Zoltar_Icon.ico")
tk.Tk.wm_title(self, "Zoltar")
# Container = window seen
container = tk.Frame(self)
container.pack(side = "top", fill = "both", expand = True)
container.grid_rowconfigure(0, weight = 1)
container.grid_rowconfigure(1, weight = 1)
container.grid_columnconfigure(0, weight = 3) # Weights the graph to be larger
container.grid_columnconfigure(1, weight = 1)
# Frame configuration: loop runs through right-side frames
self.frames = {}
# Add all right-side frames to this loop
for F in (MainPage.MainWindow, NewPredictionPage.NewPredictionWindow, AddModelPage.AddModelWindow,
DevToolsPage.DevToolsWindow):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row = 0, column = 1, sticky = "nsew")
frame = DNNPage.DNNWindow(container, self)
self.frames[DNNPage.DNNWindow] = frame
frame.grid(row = 1, column = 1, sticky = "nsew")
# Only 1 Left-side frame, but same function as loop
frame = Grapher.GrapherWindow(container, self)
self.frames[Grapher.GrapherWindow] = frame
frame.grid(row = 0, rowspan = 2, column = 0, sticky = "nsew")
self.showFrame(MainPage.MainWindow) # Initial page to show
#====================== Data Handling Methods ======================
# Needs list: report available csv, append Model Results, get stock name,
# Get CSV from Stock name
def saveNewPrediction(self):
#============================== Collecting Variables ===============================
frame = self.frames[NewPredictionPage.NewPredictionWindow]
stockNames = frame.getCurrentlySelectedStocks()
startDate = frame.getStartDate()
endDate = frame.getEndDate()
predictionName = frame.getName()
predictionWindow = frame.getPredictionWindow()
print("someting")
print(predictionWindow)
path = "Data" + os.sep + "Saved_Stock_Data" + os.sep + predictionName + ".csv"
#=============================== Generating File ==================================
allData = | pandas.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import pmdarima as pm
from json import dumps, loads
from numpy.linalg import inv
from datetime import timedelta
from sympy import Symbol, Poly, solve_poly_system
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.statespace.tools import diff
def process(file, ext, log, H, C, Y, toggle, P, D, Q):
if ext == '.csv':
df = pd.read_csv(file)
else:
df = pd.read_excel(file)
df = df[df.columns[0]]
if log == 'true':
df = np.log(df).replace(-np.inf, 0)
Zt = df.to_json(orient='values')
t = dumps([x for x in range(len(df))])
if toggle == 'on':
_, Zh, th, Zh_l, Zh_u, summary = forecast(df, H)
else:
_, Zh, th, Zh_l, Zh_u, summary = forecastARIMA(df, H, P, D, Q)
Zh_res = restricted_forecast(
Zh, H, C, Y, summary['order'], summary['params'])
data = {'Z': Zt, 't': t, 'Zh': Zh, 'th': th, 'Zh_l': Zh_l, 'Zh_u': Zh_u,
'Zh_res': Zh_res, 'summary': dumps(summary)}
return dumps(data)
def forecast(df, H):
model = pm.auto_arima(df.values[:], start_p=2, start_q=2,
test='adf', # use adftest to find optimal 'd'
max_p=5, max_q=5, # maximum p and q
m=1, # frequency of series
d=None, # let model determine 'd'
seasonal=False, # No Seasonality
start_P=0,
D=0,
trend=None,
with_intercept=False,
trace=False,
error_action='ignore',
suppress_warnings=True,
stepwise=True)
# Forecast
n_periods = H
fc, confint = model.predict(n_periods=n_periods, return_conf_int=True)
index_of_fc = pd.RangeIndex(start=df.index.stop, stop=df.index.stop + H)
th = dumps([x for x in range(len(df), len(df)+H)])
# index_of_fc = pd.PeriodIndex((pd.to_datetime(df.values[:,0]) + H*timedelta(weeks=12))[-H:],freq='Q')
fc_series = pd.Series(fc, index=index_of_fc).to_json(orient='values')
lower_series = pd.Series(
confint[:, 0], index=index_of_fc).to_json(orient='values')
upper_series = pd.Series(
confint[:, 1], index=index_of_fc).to_json(orient='values')
model_params = model.to_dict()
summary = {'order': model_params['order'], 'params': model_params['params'].tolist(
), 'summary': model.summary().as_html()}
try:
pred = model.predict(df.index.stop,
df.index.stop + H - 1,
typ='linear').tolist()
except Exception:
pred = model.predict(df.index.stop,
df.index.stop + H - 1).tolist()
pred_series = pd.Series(pred).to_json(orient='values')
return pred_series, fc_series, th, lower_series, upper_series, summary
def forecastARIMA(df, H, P, D, Q):
order = (P, D, Q)
model = ARIMA(df.values[:], order=order).fit(trend='nc')
# Forecast
n_periods = H
fc, se, confint = model.forecast(n_periods)
index_of_fc = pd.RangeIndex(start=df.index.stop, stop=df.index.stop+H)
th = dumps([x for x in range(len(df), len(df)+H)])
# index_of_fc = pd.PeriodIndex((pd.to_datetime(df.values[:,0]) + H*timedelta(weeks=12))[-H:],freq='Q')
fc_series = pd.Series(fc, index=index_of_fc).to_json(orient='values')
lower_series = pd.Series(
confint[:, 0], index=index_of_fc).to_json(orient='values')
upper_series = pd.Series(
confint[:, 1], index=index_of_fc).to_json(orient='values')
summary = {'order': order,
'params': list(model.arparams)+list(model.maparams),
'summary': model.summary().as_html()}
try:
pred = model.predict(df.index.stop,
df.index.stop + H - 1,
typ='linear').tolist()
except Exception:
pred = model.predict(df.index.stop,
df.index.stop + H - 1).tolist()
pred_series = | pd.Series(pred) | pandas.Series |
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.metrics import R2
from etna.models import LinearMultiSegmentModel
from etna.transforms import MeanSegmentEncoderTransform
from etna.transforms import SegmentEncoderTransform
@pytest.fixture
def dummy_df() -> pd.DataFrame:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = 1
df_2["segment"] = "Omsk"
df_2["target"] = 2
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = classic_df.pivot(index="timestamp", columns="segment")
df = df.reorder_levels([1, 0], axis=1)
df = df.sort_index(axis=1)
df.columns.names = ["segment", "feature"]
return df
def test_dummy(dummy_df):
transform = SegmentEncoderTransform()
transformed_df = transform.fit_transform(dummy_df)
assert (
len(transformed_df.loc[:, pd.IndexSlice[:, "regressor_segment_code"]].columns) == 2
), "Number of columns not the same as segments"
assert len(dummy_df) == len(transformed_df), "Row missing"
@pytest.fixture
def simple_df() -> pd.DataFrame:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-06-07", freq="D")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-06-07", freq="D")})
df_1["segment"] = "Moscow"
df_1["target"] = [1.0, 2.0, 3.0, 4.0, 5.0, np.NAN, np.NAN]
df_1["exog"] = [6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]
df_2["segment"] = "Omsk"
df_2["target"] = [10.0, 20.0, 30.0, 40.0, 50.0, np.NAN, np.NAN]
df_2["exog"] = [60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
return df
@pytest.fixture
def transformed_simple_df() -> pd.DataFrame:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-06-07", freq="D")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-06-01", "2021-06-07", freq="D")})
df_1["segment"] = "Moscow"
df_1["target"] = [1.0, 2.0, 3.0, 4.0, 5.0, np.NAN, np.NAN]
df_1["exog"] = [6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0]
df_1["regressor_segment_mean"] = [0, 1, 1.5, 2, 2.5, 3, 3]
df_2["segment"] = "Omsk"
df_2["target"] = [10.0, 20.0, 30.0, 40.0, 50.0, np.NAN, np.NAN]
df_2["exog"] = [60.0, 70.0, 80.0, 90.0, 100.0, 110.0, 120.0]
df_2["regressor_segment_mean"] = [0.0, 10.0, 15.0, 20.0, 25.0, 30, 30]
classic_df = | pd.concat([df_1, df_2], ignore_index=True) | pandas.concat |
"""This file contains functions and tests to calculate the stylized facts"""
import pandas as pd
import numpy as np
from math import isclose
from stockmarket.functions import div0
# return autocorrelation close to zero after lag 1
# calculate returns
def calculate_close(orderbook_transaction_price_history):
closing_prices = []
for day in orderbook_transaction_price_history:
closing_prices.append(day[-1])
close = pd.Series(closing_prices).pct_change()
return close
def calculate_returns(orderbook_transaction_price_history):
"""Return the returns"""
closing_prices = []
for day in orderbook_transaction_price_history:
closing_prices.append(day[-1])
returns = pd.Series(closing_prices).pct_change()
return returns[1:]
# Test 1
def zero_autocorrelation(returns, lags):
"""returns wether average autocorrelation is much different from zero"""
autocorr_returns = [returns.autocorr(lag=lag) for lag in range(lags)]
# if mean autocorrelation are between -0.1 and 0.1
average_autocorrelation = np.mean(autocorr_returns[1:])
if (average_autocorrelation < 0.1) and (average_autocorrelation > -0.1):
return True, average_autocorrelation
else:
return False, np.inf
# # Test 2
# def fat_tails(returns):
# results = powerlaw.Fit(returns)
# alpha = results.power_law.alpha
# #print(alpha)
# if (alpha < 5) and (alpha > 3):
# return True, alpha
# else:
# return False, np.inf
def fat_tails_kurtosis(returns):
series_returns = pd.Series(returns)
kurt = series_returns.kurtosis()
if kurt > 4:
return True, kurt
else:
return False, np.inf
# Test 3
def clustered_volatility(returns, lags):
absolute_returns = returns.abs()
autocorr_abs_returns = [absolute_returns.autocorr(lag=lag) for lag in range(lags)]
average_autocorrelation = np.mean(autocorr_abs_returns[1:])
#print(average_autocorrelation)
if (average_autocorrelation < 0.1) and (average_autocorrelation > -0.1):
return False, np.inf
else:
return True, average_autocorrelation
# Test 4
def long_memory(returns, hurst_function, lag1, lag2):
h = hurst_function(returns, lag1, lag2)
#print('h = ', h)
return not isclose(0.5, h, abs_tol=(10 ** -1 / 2)), h
# functions to calculate stylized facts
def autocorrelation_returns(returns, lags):
"""
Calculate the average autocorrelation in a returns time series
:param returns: time series of returns
:param lags: the lags over which the autocorrelation is to be calculated
:return: average autocorrelation
"""
returns = | pd.Series(returns) | pandas.Series |
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.models import Sequential
from keras.layers import LSTM,Dense,Dropout
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.optimizers import Adam
from sklearn.preprocessing import MinMaxScaler
from statistics import mean
from numpy import newaxis
import csv
import argparse
# Arguments
epochs = 100
batch_size = 32
past_day = 5
future_day = 1
# 以收盤價為train, 以開盤價為target label
def split_dataset(df, past_day, future_day):
X, Y = [], []
for i in range(len(df) - future_day - past_day):
X.append(np.array(df[i:i+past_day, 0]))
Y.append(np.array(df[i+past_day:i+past_day+future_day, 0]))
return np.array(X), np.array(Y)
def build_model(shape):
model = Sequential()
model.add(LSTM(64, input_shape=(shape[1], shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(64, return_sequences=True))
model.add(Dropout(0.2))
model.add(Dense(1))
return model
def plotting(input1, input2, title, legend, x_label=None, y_label=None, grid=True, figsize=(20, 8)):
plt.figure(figsize=figsize)
plt.plot(input1)
plt.plot(input2)
plt.title(title)
plt.legend(legend)
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.grid(grid)
plt.show()
def calculate_revenue(size, nu):
# 1: buy
# 0: hold
# -1: short/sell
# size = x_test.shape[0] - 1
status = 0
flag = 0
revenue = 0
with open('output.csv', mode='w') as csv_file:
writer = csv.writer(csv_file)
for i in range(size):
if (status == 1):
if (nu[i+1]<nu[i]):
writer.writerow(['-1'])
status = 0
revenue = revenue+nu[i]
else:
writer.writerow(['0'])
elif (status == 0):
if (nu[i+1]>nu[i]):
writer.writerow(['1'])
status = 1
revenue = revenue-nu[i]
elif (nu[i+1]<nu[i]):
writer.writerow(['-1'])
status = -1
revenue = revenue+nu[i]
else:
writer.writerow(['0'])
else :
if (nu[i+1]>nu[i]):
writer.writerow(['1'])
status = 0
revenue = revenue-nu[i]
else:
writer.writerow(['0'])
if (status==1) :
revenue = revenue + nu[size]
elif (status==-1) :
revenue = revenue - nu[size]
return revenue
if __name__ == "__main__":
# Main Arguments
main_path = '';
parser = argparse.ArgumentParser()
parser.add_argument("--training", default="dataset/training.csv", help="input training data file name")
parser.add_argument("--testing", default="dataset/testing.csv", help="input testing data file name")
parser.add_argument("--output", default="output.csv", help="output file name")
args = parser.parse_args()
train_df = pd.read_csv(args.training, header=None)
test_df = pd.read_csv(args.testing, header=None)
train_df.drop([1,2,3], inplace=True, axis=1)
test_df.drop([1,2,3], inplace=True, axis=1)
test_df = pd.DataFrame(np.insert(test_df.to_numpy(), 0, train_df.to_numpy()[-(past_day+1):], axis=0))
train_df = pd.DataFrame(train_df.to_numpy()[:-(past_day+1)])
# Scaling
sc = MinMaxScaler(feature_range=(-1, 1))
scaled_train_df = sc.fit_transform(train_df)
scaled_test_df = sc.transform(test_df)
# Generate training data and label
x_train, y_train = split_dataset(scaled_train_df, past_day, future_day)
x_test, y_test = split_dataset(scaled_test_df, past_day, future_day)
# Plotting Original Open and Close Price of testing.csv
plotting(x_test[:,-1], y_test, 'Price', ['close price','open price'])
# Reshape the data into (Samples, Timestep, Features)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], 1)
# Build model
model = build_model(x_train.shape)
model.summary()
# Compile and Fit
reduce_lr = tf.keras.callbacks.LearningRateScheduler(lambda x: 1e-3 * 0.9 ** x)
early_stopping = EarlyStopping(monitor='loss', patience=10, verbose=1, mode='auto')
model.compile(optimizer=Adam(), loss='mean_squared_error')
history = model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size,validation_data=(x_test, y_test), shuffle=False, callbacks=[reduce_lr, early_stopping])
model.save('model.h5')
# Plotting Model Loss
plotting(input1=history.history['loss'], input2=history.history['val_loss'], title='Model Loss', legend=['Train','Valid'], x_label='Epochs', y_label='Loss')
# Start Predicting
predicted = model.predict(x_test)
predict = sc.inverse_transform(predicted.reshape(predicted.shape[0], predicted.shape[1]))
# 將預測data前移三天,並利用預測結果預測剩餘最後三天的股票
last = np.array([x_test[-1, 1:], x_test[-1, 2:], x_test[-1, 3:]], dtype=object)
# print(last)
# print(np.array(predicted[newaxis, -2, -1]))
last[0] = np.array(np.concatenate((np.array(last[0]), np.array(predicted[newaxis, -3, -1]))))
last[1] = np.concatenate((last[1], predicted[newaxis, -3, -1]))
last[1] = np.array(np.concatenate((last[1], predicted[newaxis, -2, -1])))
last[2] = np.concatenate((last[2], predicted[newaxis, -3, -1]))
last[2] = np.concatenate((last[2], predicted[newaxis, -2, -1]))
last[2] = np.array(np.concatenate((last[2], predicted[newaxis, -1, -1])))
last[0] = pd.DataFrame(last[0])
last[1] = pd.DataFrame(last[1])
last[2] = | pd.DataFrame(last[2]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 14:41:52 2017
@author: mazeller
"""
#Imports
import pandas as pd
import numpy as np
import sklearn
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import VotingRegressor
from sklearn.datasets import make_regression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, explained_variance_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score
class featureRegression:
def __init__(self, fileTrainingSet):
"""Creates features from alignments and antigenic distances"""
self.__fileTrainingSet = fileTrainingSet
self.__loadTrainingSet()
def __loadTrainingSet(self):
df = | pd.read_csv(self.__fileTrainingSet, header=0, index_col=[0]) | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from validada.slicers import iloc
import validada.functions.raising as ck
import validada.decorators.raising as dc
import datetime as dt
def _add_one(df):
return df + 1
def _safe_add_one(df):
return df.fillna(0.0) + 1
def _noop(df):
return df
def test_is_in_index():
dr = pd.date_range(start='2015-01-01', periods=6, freq='D')
df = pd.DataFrame(data = list(range(6)), index=dr)
d = dt.date(2015,1,3)
result = ck.has_in_index(df, obj=d)
tm.assert_frame_equal(df, result)
result = dc.has_in_index(obj=d)(_add_one)(df)
| tm.assert_frame_equal(result, df + 1) | pandas.util.testing.assert_frame_equal |
from cProfile import label
from operator import index
from turtle import color
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Pandas ile verileri okuduk
AAPL = pd.read_csv("AAPL.csv")
#print(AAPL.head())
#30 Günlük hareketli ortalama
SMA30 = pd.DataFrame() #Yeni dataset oluşturduk
SMA30 ['Deger'] = AAPL['Close'].rolling(window=20).mean() #Günlerin son 30 günlük ortalamasını aldık
#30 Günlük hareketli ortalama
SMA100 = pd.DataFrame()
SMA100 ['Deger'] = AAPL['Close'].rolling(window=200).mean() #Günlerin son 100 günlük ortalamasını aldık
#Verilerin hepsini bir veri setine topladık
veriseti = | pd.DataFrame() | pandas.DataFrame |
class Pywedge_Charts():
'''
Makes 8 different types of interactive Charts with interactive axis selection widgets in a single line of code for the given dataset.
Different types of Charts viz,
1. Scatter Plot
2. Pie Chart
3. Bar Plot
4. Violin Plot
5. Box Plot
6. Distribution Plot
7. Histogram
8. Correlation Plot
Inputs:
1. Dataframe
2. c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)
3. y = target column name as a string
Returns:
Charts widget
'''
def __init__(self, train, c, y, manual=True):
self.train = train
self.c = c
self.y = y
self.X = self.train.drop(self.y,1)
self.manual = manual
def make_charts(self):
import pandas as pd
import ipywidgets as widgets
import plotly.express as px
import plotly.figure_factory as ff
import plotly.offline as pyo
from ipywidgets import HBox, VBox, Button
from ipywidgets import interact, interact_manual, interactive
import plotly.graph_objects as go
from plotly.offline import iplot
header = widgets.HTML(value="<h2>Pywedge Make_Charts </h2>")
display(header)
if len(self.train) > 500:
from sklearn.model_selection import train_test_split
test_size = 500/len(self.train)
if self.c!=None:
data = self.X.drop(self.c,1)
else:
data = self.X
target = self.train[self.y]
X_train, X_test, y_train, y_test = train_test_split(data, target, test_size=test_size, random_state=1)
train_mc = pd.concat([X_test, y_test], axis=1)
else:
train_mc = self.train
train_numeric = train_mc.select_dtypes('number')
train_cat = train_mc.select_dtypes(exclude='number')
out1 = widgets.Output()
out2 = widgets.Output()
out3 = widgets.Output()
out4 = widgets.Output()
out5 = widgets.Output()
out6 = widgets.Output()
out7 = widgets.Output()
out8 = widgets.Output()
out8 = widgets.Output()
tab = widgets.Tab(children = [out1, out2, out3, out4, out5, out6, out7, out8])
tab.set_title(0, 'Scatter Plot')
tab.set_title(1, 'Pie Chart')
tab.set_title(2, 'Bar Plot')
tab.set_title(3, 'Violin Plot')
tab.set_title(4, 'Box Plot')
tab.set_title(5, 'Distribution Plot')
tab.set_title(6, 'Histogram')
tab.set_title(7, 'Correlation plot')
display(tab)
with out1:
header = widgets.HTML(value="<h1>Scatter Plots </h1>")
display(header)
x = widgets.Dropdown(options=list(train_mc.select_dtypes('number').columns))
def scatter_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes('number').columns)):
fig = go.FigureWidget(data=go.Scatter(x=train_mc[X_Axis],
y=train_mc[Y_Axis],
mode='markers',
text=list(train_cat),
marker_color=train_mc[Color]))
fig.update_layout(title=f'{Y_Axis.title()} vs {X_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig.show()
widgets.interact_manual.opts['manual_name'] = 'Make_Chart'
one = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(scatter_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out2:
header = widgets.HTML(value="<h1>Pie Charts </h1>")
display(header)
def pie_chart(Labels=list(train_mc.select_dtypes(exclude='number').columns),
Values=list(train_mc.select_dtypes('number').columns)[0:]):
fig = go.FigureWidget(data=[go.Pie(labels=train_mc[Labels], values=train_mc[Values])])
fig.update_layout(title=f'{Values.title()} vs {Labels.title()}',
autosize=False,width=500,height=500)
fig.show()
one = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(pie_chart, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out3:
header = widgets.HTML(value="<h1>Bar Plots </h1>")
display(header)
def bar_plot(X_Axis=list(train_mc.select_dtypes(exclude='number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig1 = px.bar(train_mc, x=train_mc[X_Axis], y=train_mc[Y_Axis], color=train_mc[Color])
fig1.update_layout(barmode='group',
title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig1.show()
one = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(bar_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out4:
header = widgets.HTML(value="<h1>Violin Plots </h1>")
display(header)
def viol_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig2 = px.violin(train_mc, X_Axis, Y_Axis, Color, box=True, hover_data=train_mc.columns)
fig2.update_layout(title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(viol_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out5:
header = widgets.HTML(value="<h1>Box Plots </h1>")
display(header)
def box_plot(X_Axis=list(train_mc.select_dtypes(exclude='number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[0:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig4 = px.box(train_mc, x=X_Axis, y=Y_Axis, color=Color, points="all")
fig4.update_layout(barmode='group',
title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
yaxis_title=f'{Y_Axis.title()}',
autosize=False,width=600,height=600)
fig4.show()
one = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(box_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out6:
header = widgets.HTML(value="<h1>Distribution Plots </h1>")
display(header)
def dist_plot(X_Axis=list(train_mc.select_dtypes('number').columns),
Y_Axis=list(train_mc.select_dtypes('number').columns)[1:],
Color=list(train_mc.select_dtypes(exclude='number').columns)):
fig2 = px.histogram(train_mc, X_Axis, Y_Axis, Color, marginal='violin', hover_data=train_mc.columns)
fig2.update_layout(title=f'{X_Axis.title()} vs {Y_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(dist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out7:
header = widgets.HTML(value="<h1>Histogram </h1>")
display(header)
def hist_plot(X_Axis=list(train_mc.columns)):
fig2 = px.histogram(train_mc, X_Axis)
fig2.update_layout(title=f'{X_Axis.title()}',
xaxis_title=f'{X_Axis.title()}',
autosize=False,width=600,height=600)
fig2.show()
one = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
two = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
three = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
four = interactive(hist_plot, {'manual': self.manual, 'manual_name':'Make_Chart'})
g = widgets.HBox([one, two])
h = widgets.HBox([three, four])
i = widgets.VBox([g,h])
display(i)
with out8:
header = widgets.HTML(value="<h1>Correlation Plots </h1>")
display(header)
import plotly.figure_factory as ff
corrs = train_mc.corr()
colorscale = ['Greys', 'Greens', 'Bluered', 'RdBu',
'Reds', 'Blues', 'Picnic', 'Rainbow', 'Portland', 'Jet',
'Hot', 'Blackbody', 'Earth', 'Electric', 'Viridis', 'Cividis']
@interact_manual
def plot_corrs(colorscale=colorscale):
figure = ff.create_annotated_heatmap(z = corrs.round(2).values,
x =list(corrs.columns),
y=list(corrs.index),
colorscale=colorscale,
annotation_text=corrs.round(2).values)
iplot(figure)
class baseline_model():
'''
Cleans the raw dataframe to fed into ML models and runs various baseline models. Following data pre_processing will be carried out,
1) segregating numeric & categorical columns
2) missing values imputation for numeric & categorical columns
3) standardization
4) feature importance
5) SMOTE
6) baseline model
Inputs:
1) train = train dataframe
2) test = stand out test dataframe (without target column)
2) c = any redundant column to be removed (like ID column etc., at present supports a single column removal, subsequent version will provision multiple column removal requirements)
3) y = target column name as a string
4) type = Classification / Regression
Returns:
1) Various classification/regressions models & model performances
2) new_X (cleaned feature columns in dataframe)
3) new_y (cleaned target column in dataframe)
4) new_test (cleaned stand out test dataframe
'''
def __init__(self, train, test, c, y, type="Classification"):
self.train = train
self.test = test
self.c = c
self.y = y
self.type = type
self.X = train.drop(self.y,1)
def classification_summary(self):
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pywedge Baseline Models </h2>")
display(header)
out1 = widgets.Output()
out2 = widgets.Output()
tab = widgets.Tab(children = [out1, out2])
tab.set_title(0,'Baseline Models')
tab.set_title(1, 'Predict Baseline Model')
display(tab)
with out1:
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pre_processing </h2>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.VBox([cat_info, std_scr, apply_smote])
pp_reg = widgets.VBox([cat_info, std_scr])
if self.type == 'Classification':
display(pp_class)
else:
display(pp_reg)
test_size = widgets.BoundedFloatText(
value=0.20,
min=0.05,
max=0.5,
step=0.05,
description='Text Size %',
disabled=False)
display(test_size)
button_1 = widgets.Button(description = 'Run Baseline models')
out = widgets.Output()
def on_button_clicked(_):
with out:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if self.type=="Classification":
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
print('\nStarting classification_summary...')
print('TOP 10 FEATURE IMPORTANCE - USING ADABOOST CLASSIFIER')
from sklearn.ensemble import AdaBoostClassifier
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
ab = AdaBoostClassifier().fit(self.new_X, self.new_y)
print(pd.Series(ab.feature_importances_, index=self.new_X.columns).sort_values(ascending=False).head(10))
from sklearn.model_selection import train_test_split
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=test_size.value, random_state=1)
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, HistGradientBoostingClassifier
from catboost import CatBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import roc_auc_score
import warnings
warnings.filterwarnings('ignore')
from tqdm.notebook import trange, tqdm
classifiers = {
"Logistic" : LogisticRegression(n_jobs=-1),
"KNN(3)" : KNeighborsClassifier(3, n_jobs=-1),
"Decision Tree": DecisionTreeClassifier(max_depth=7),
"Random Forest": RandomForestClassifier(max_depth=7, n_estimators=10, max_features=4, n_jobs=-1),
"AdaBoost" : AdaBoostClassifier(),
"GB Classifier": GradientBoostingClassifier(),
"ExtraTree Cls": ExtraTreesClassifier(n_jobs=-1),
"Hist GB Cls" : HistGradientBoostingClassifier(),
"MLP Cls." : MLPClassifier(alpha=1),
"XGBoost" : xgb.XGBClassifier(max_depth=4, n_estimators=10, learning_rate=0.1, n_jobs=-1),
"CatBoost" : CatBoostClassifier(silent=True),
"Naive Bayes" : GaussianNB(),
"QDA" : QuadraticDiscriminantAnalysis(),
"Linear SVC" : LinearSVC(),
}
from time import time
k = 14
head = list(classifiers.items())[:k]
for name, classifier in tqdm(head):
start = time()
classifier.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = classifier.predict(self.X_test)
predict_time = time()-start
acc_score= (accuracy_score(self.y_test,predictions))
roc_score= (roc_auc_score(self.y_test,predictions))
f1_macro= (f1_score(self.y_test, predictions, average='macro'))
print("{:<15}| acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, acc_score, roc_score, f1_macro, train_time, predict_time))
button_1.on_click(on_button_clicked)
a = widgets.VBox([button_1, out])
display(a)
with out2:
base_model = widgets.Dropdown(
options=['Logistic Regression', 'KNN', 'Decision Tree', 'Random Forest', 'MLP Classifier', 'AdaBoost', 'CatBoost', 'GB Classifier', 'ExtraTree Cls', 'Hist GB Cls' ],
value='Logistic Regression',
description='Choose Base Model: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_model)
button_2 = widgets.Button(description = 'Predict Baseline models')
out2 = widgets.Output()
def on_pred_button_clicked(_):
with out2:
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, HistGradientBoostingClassifier
from catboost import CatBoostClassifier
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
clear_output()
print(base_model.value)
if base_model.value == 'Logistic Regression':
classifier = LogisticRegression(max_iter=1000, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('> Prediction completed. \n> Use dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'KNN':
classifier = KNeighborsClassifier(3, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Decision Tree':
classifier = DecisionTreeClassifier(max_depth=7)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Random Forest':
classifier = RandomForestClassifier(max_depth=7, n_estimators=10, max_features=4, n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'MLP Classifier':
classifier = MLPClassifier(alpha=1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'AdaBoost':
classifier = AdaBoostClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'CatBoost':
classifier = CatBoostClassifier(silent=True)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'GB Classifier':
classifier = GradientBoostingClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
if base_model.value == 'ExtraTree Cls':
classifier = ExtraTreesClassifier(n_jobs=-1)
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
if base_model.value == 'Hist GB Cls':
classifier = HistGradientBoostingClassifier()
classifier.fit(self.X_train, self.y_train)
self.predictions_baseline = classifier.predict(self.new_test)
self.predict_proba_baseline = classifier.predict_proba(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline (for predictions) & blm.predict_proba_baseline (for predict_proba), where blm is pywedge_baseline_model class object')
button_2.on_click(on_pred_button_clicked)
b = widgets.VBox([button_2, out2])
display(b)
def Regression_summary(self):
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pywedge Baseline Models </h2>")
display(header)
out1 = widgets.Output()
out2 = widgets.Output()
tab = widgets.Tab(children = [out1, out2])
tab.set_title(0,'Baseline Models')
tab.set_title(1, 'Predict Baseline Model')
display(tab)
with out1:
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button
from IPython.display import display, Markdown, clear_output
header = widgets.HTML(value="<h2>Pre_processing </h2>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.VBox([cat_info, std_scr, apply_smote])
pp_reg = widgets.VBox([cat_info, std_scr])
if self.type == 'Classification':
display(pp_class)
else:
display(pp_reg)
test_size = widgets.BoundedFloatText(
value=0.20,
min=0.05,
max=0.5,
step=0.05,
description='Text Size %',
disabled=False)
display(test_size)
button_1 = widgets.Button(description = 'Run Baseline models')
out = widgets.Output()
def on_button_clicked(_):
with out:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
print('Starting regression summary...')
print('TOP 10 FEATURE IMPORTANCE TABLE')
from sklearn.ensemble import AdaBoostRegressor
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
ab = AdaBoostRegressor().fit(self.new_X, self.new_y)
print(pd.Series(ab.feature_importances_, index=self.new_X.columns).sort_values(ascending=False).head(10))
from sklearn.model_selection import train_test_split
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=test_size.value, random_state=1)
from time import time
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.svm import LinearSVR
from sklearn.linear_model import Lasso, Ridge
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.tree import DecisionTreeRegressor
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor, ExtraTreesRegressor, HistGradientBoostingRegressor
from catboost import CatBoostRegressor
from sklearn.neural_network import MLPRegressor
import xgboost as xgb
from math import sqrt
from tqdm.notebook import trange, tqdm
import warnings
warnings.filterwarnings('ignore')
print('--------------------------LINEAR MODELS---------------------------------')
lin_regressors = {
'Linear Reg' : LinearRegression(n_jobs=-1),
'KNN' : KNeighborsRegressor(n_jobs=-1),
'LinearSVR' : LinearSVR(),
'Lasso' : Lasso(),
'Ridge' : Ridge(),
}
from time import time
k = 10
head = list(lin_regressors.items())[:k]
for name, lin_regressors in tqdm(head):
start = time()
lin_regressors.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = lin_regressors.predict(self.X_test)
predict_time = time()-start
exp_var = explained_variance_score(self.y_test, predictions)
mae = mean_absolute_error(self.y_test, predictions)
rmse = sqrt(mean_absolute_error(self.y_test, predictions))
r2 = r2_score(self.y_test, predictions)
print("{:<15}| exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, exp_var, mae, rmse, r2, train_time, predict_time))
print('------------------------NON LINEAR MODELS----------------------------------')
print('---------------------THIS MIGHT TAKE A WHILE-------------------------------')
non_lin_regressors = {
#'SVR' : SVR(),
'Decision Tree' : DecisionTreeRegressor(max_depth=5),
'Random Forest' : RandomForestRegressor(max_depth=10, n_jobs=-1),
'GB Regressor' : GradientBoostingRegressor(n_estimators=200),
'CB Regressor' : CatBoostRegressor(silent=True),
'ADAB Regressor': AdaBoostRegressor(),
'MLP Regressor' : MLPRegressor(),
'XGB Regressor' : xgb.XGBRegressor(n_jobs=-1),
'Extra tree Reg': ExtraTreesRegressor(n_jobs=-1),
'Hist GB Reg' : HistGradientBoostingRegressor()
}
from time import time
k = 10
head = list(non_lin_regressors.items())[:k]
for name, non_lin_regressors in tqdm(head):
start = time()
non_lin_regressors.fit(self.X_train, self.y_train)
train_time = time() - start
start = time()
predictions = non_lin_regressors.predict(self.X_test)
predict_time = time()-start
exp_var = explained_variance_score(self.y_test, predictions)
mae = mean_absolute_error(self.y_test, predictions)
rmse = sqrt(mean_absolute_error(self.y_test, predictions))
r2 = r2_score(self.y_test, predictions)
print("{:<15}| exp_var = {:.3f} | mae = {:,.3f} | rmse = {:,.3f} | r2 = {:,.3f} | Train time = {:,.3f}s | Pred. time = {:,.3f}s".format(name, exp_var, mae, rmse, r2, train_time, predict_time))
button_1.on_click(on_button_clicked)
a = widgets.VBox([button_1, out])
display(a)
with out2:
base_model = widgets.Dropdown(
options=['Linear Regression', 'KNN', 'Decision Tree', 'Random Forest', 'MLP Regressor', 'AdaBoost', 'Grad-Boost''CatBoost'],
value='Linear Regression',
description='Choose Base Model: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_model)
button_2 = widgets.Button(description = 'Predict Baseline models')
out2 = widgets.Output()
def on_pred_button_clicked(_):
with out2:
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, AdaBoostRegressor
from catboost import CatBoostRegressor
from sklearn.neural_network import MLPRegressor
import xgboost as xgb
clear_output()
print(base_model.value)
if base_model.value == 'Linear Regression':
regressor = LinearRegression()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'KNN':
regressor = KNeighborsRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Decision Tree':
regressor = DecisionTreeRegressor(max_depth=5)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Random Forest':
regressor = RandomForestRegressor(max_depth=10)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'MLP Regressor':
regressor = MLPRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'AdaBoost':
regressor = AdaBoostRegressor()
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'Grad-Boost':
regressor = GradientBoostingRegressor(n_estimators=200)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
if base_model.value == 'CatBoost':
regressor = CatBoostRegressor(silent=True)
regressor.fit(self.X_train, self.y_train)
self.predictions_baseline = regressor.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., blm.predictions_baseline, where blm is pywedge_baseline_model class object')
button_2.on_click(on_pred_button_clicked)
b = widgets.VBox([button_2, out2])
display(b)
class Pywedge_HP():
'''
Creates interative widget based Hyperparameter selection tool for both Classification & Regression.
For Classification, following baseline estimators are covered in Gridsearch & Randomized search options
1) Logistic Regression
2) Decision Tree
3) Random Forest
4) KNN Classifier
For Regression, following baseline estimators are covered in Gridsearch & Randomized search options
1) Linear Regression
2) Decision Tree Regressor
3) Random Forest Regressor
4) KNN Regressor
Inputs:
1) train = train dataframe
2) test = stand out test dataframe (without target column)
3) c = any redundant column to be removed (like ID column etc., at present supports a single column removal,
subsequent version will provision multiple column removal requirements)
4) y = target column name as a string
Ouputs:
1) Hyperparameter tuning results
2) Prediction on standout test dataset
'''
def __init__(self, train, test, c, y, tracking=False):
self.train = train
self.test = test
self.c = c
self.y = y
self.X = train.drop(self.y,1)
self.tracking = tracking
def HP_Tune_Classification(self):
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
import ipywidgets as widgets
from ipywidgets import HBox, VBox, Button, Label
from ipywidgets import interact_manual, interactive, interact
import logging
from IPython.display import display, Markdown, clear_output
import warnings
warnings.filterwarnings('ignore')
header_1 = widgets.HTML(value="<h2>Pywedge HP_Tune</h2>")
display(header_1)
out1 = widgets.Output()
out2 = widgets.Output()
out3 = widgets.Output()
tab = widgets.Tab(children = [out1, out2, out3])
tab.set_title(0, 'Input')
tab.set_title(1, 'Output')
tab.set_title(2, 'Helper Page')
display(tab)
with out1:
header = widgets.HTML(value="<h3>Base Estimator</h3>")
display(header)
import pandas as pd
cat_info = widgets.Dropdown(
options = [('cat_codes', '1'), ('get_dummies', '2')],
value = '1',
description = 'Select categorical conversion',
style = {'description_width': 'initial'},
disabled=False)
std_scr = widgets.Dropdown(
options = [('StandardScalar', '1'), ('RobustScalar', '2'), ('MinMaxScalar', '3'), ('No Standardization', 'n')],
value = 'n',
description = 'Select Standardization methods',
style = {'description_width': 'initial'},
disabled=False)
apply_smote = widgets.Dropdown(
options = [('Yes', 'y'), ('No', 'n')],
value = 'y',
description = 'Do you want to apply SMOTE?',
style = {'description_width': 'initial'},
disabled=False)
pp_class = widgets.HBox([cat_info, std_scr, apply_smote])
header_2 = widgets.HTML(value="<h3>Pre_processing </h3>")
base_estimator = widgets.Dropdown(
options=['Logistic Regression', 'Decision Tree', 'Random Forest','AdaBoost', 'ExtraTree Classifier', 'KNN Classifier'],
value='Logistic Regression',
description='Choose Base Estimator: ',
style = {'description_width': 'initial'},
disabled=False)
display(base_estimator)
button = widgets.Button(description='Select Base Estimator')
out = widgets.Output()
# Logistic Regression Hyperparameters _Start
penalty_L = widgets.SelectMultiple(
options = ['l1', 'l2', 'elasticnet', 'none'],
value = ['none'],
rows = 4,
description = 'Penalty',
disabled = False)
dual_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Dual',
disabled = False)
tol_L = widgets.Text(
value='0.0001',
placeholder='enter any float value',
description='Tolerence (tol)',
style = {'description_width': 'initial'},
disabled=False)
g = widgets.HBox([penalty_L, dual_L, tol_L])
C_L = widgets.Text(
value='1.0',
placeholder='enter any float value',
description='C',
disabled=False)
fit_intercept_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Fit_intercept',
disabled = False)
intercept_scaling_L = widgets.Text(
value='1.0',
placeholder='enter any float value',
description='Intercept_scaling',
style = {'description_width': 'initial'},
disabled=False)
h = widgets.HBox([C_L, fit_intercept_L, intercept_scaling_L])
class_weight_L = widgets.SelectMultiple(
options = ['balanced', 'None'],
value = ['None'],
rows = 2,
description = 'Class_weight',
disabled = False)
random_state_L = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
solver_L = widgets.SelectMultiple(
options = ['newton-cg', 'lbfgs', 'sag', 'saga'],
value = ['lbfgs'],
rows = 4,
description = 'Solver',
disabled = False)
i= widgets.HBox([class_weight_L, random_state_L, solver_L])
max_iter_L = widgets.Text(
value='100',
placeholder='enter any integer value',
description='Max_Iterations',
style = {'description_width': 'initial'},
disabled=False)
verbose_L = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Verbose',
disabled=False)
warm_state_L = widgets.SelectMultiple(
options = [True, False],
value = [False],
rows = 2,
description = 'Warm_State',
disabled = False)
j= widgets.HBox([max_iter_L, verbose_L, warm_state_L])
L1_Ratio_L = widgets.Text(
value='None',
placeholder='enter any integer value',
description='L1_Ratio',
style = {'description_width': 'initial'},
disabled=False)
k = widgets.HBox([L1_Ratio_L])
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_L = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_L = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_L = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_L, cv_L, scoring_L])
n_iter_L = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_L = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_L, n_iter_L, n_iter_text])
null = widgets.HTML('<br></br>')
button_2 = widgets.Button(description='Submit HP_Tune')
out_res = widgets.Output()
def on_out_res_clicked(_):
with out_res:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
param_grid = {'penalty': list(penalty_L.value),
'dual': list(dual_L.value),
'tol': [float(item) for item in tol_L.value.split(',')],
'C' : [float(item) for item in C_L.value.split(',')],
'fit_intercept' : list(fit_intercept_L.value),
'intercept_scaling' : [float(item) for item in intercept_scaling_L.value.split(',')],
'class_weight' : list(class_weight_L.value),
'random_state' : [int(item) for item in random_state_L.value.split(',')],
'solver' : list(solver_L.value),
'max_iter' : [float(item) for item in max_iter_L.value.split(',')],
# 'multi_class' : list(multiclass.value),
'verbose' : [float(item) for item in verbose_L.value.split(',')],
# 'n_jobs' : [float(item) for item in n_jobs.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
warnings.filterwarnings("ignore")
estimator = LogisticRegression()
if search_param_L.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_L.value),
scoring = scoring_L.value)
if search_param_L.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
n_jobs = int(n_jobs_L.value),
scoring = scoring_L.value)
with mlflow.start_run() as run:
warnings.filterwarnings("ignore")
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = LogisticRegression()
if search_param_L.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
scoring = scoring_L.value)
if search_param_L.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
from ipywidgets import interact, interactive
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
Pred = widgets.HTML(value='<h3><em>Predictions on stand_out test data</em></h3>')
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_2.on_click(on_out_res_clicked)
b = widgets.VBox([button_2, out_res])
h1 = widgets.HTML('<h3>Select Logistic Regression Hyperparameters</h3>')
aa = widgets.VBox([header_2, pp_class, h1, g,h,i,j,k, h5, l, m, null, b])
# Logistic Regression Hyperpameter - Ends
# Decision Tree Hyperparameter - Starts
criterion_D = widgets.SelectMultiple(
options = ['gini', 'entropy'],
value = ['gini'],
description = 'Criterion',
rows = 2,
disabled = False)
splitter_D = widgets.SelectMultiple(
options = ['best', 'random'],
value = ['best'],
rows = 2,
description = 'Splitter',
disabled = False)
max_depth_D = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_D = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_D = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_D = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_D = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_D = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
disabled=False)
max_leaf_nodes_D = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_D = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
class_weight_D = widgets.SelectMultiple(
options = ['balanced', 'None'],
value = ['balanced'],
rows = 2,
description = 'Class_weight',
style = {'description_width': 'initial'},
disabled = False)
ccp_alpha_D = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
first_row = widgets.HBox([criterion_D, splitter_D, max_features_D])
second_row = widgets.HBox([min_samples_split_D, min_weight_fraction_D, max_depth_D])
third_row = widgets.HBox([random_state_D, max_leaf_nodes_D, min_impurity_decrease_D])
fourth_row = widgets.HBox([ccp_alpha_D, class_weight_D, min_samples_leaf_D])
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_D = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_D = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_D = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_D, cv_D, scoring_D])
n_iter_D = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_D = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_D, n_iter_D, n_iter_text])
button_3 = widgets.Button(description='Submit HP_Tune')
out_res_DT = widgets.Output()
def on_out_res_clicked_DT(_):
with out_res_DT:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
# print(criterion_D.value)
param_grid = {'criterion': list(criterion_D.value),
'splitter': list(splitter_D.value),
'max_depth': [int(item) for item in max_depth_D.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_D.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_D.value.split(',')],
# 'min_weight_fraction' : [float(item) for item in min_weight_fraction.value.split(',')],
'max_features' : list(max_features_D.value),
'random_state' : [int(item) for item in random_state_D.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_D.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_D.value.split(',')],
'ccp_alpha' : [float(item) for item in ccp_alpha_D.value.split(',')],
'class_weight' : list(class_weight_D.value)
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
warnings.filterwarnings("ignore")
estimator = DecisionTreeClassifier()
if search_param_D.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
scoring = scoring_D.value)
if search_param_D.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
n_iter = int(n_iter_D.value),
scoring = scoring_D.value)
with mlflow.start_run() as run:
warnings.filterwarnings("ignore", category=Warning)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = DecisionTreeClassifier()
if search_param_D.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_D.value),
cv = int(cv_D.value),
scoring = scoring_D.value)
if search_param_D.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_D.value),
n_jobs = int(n_jobs_D.value),
n_iter = int(n_iter_D.value),
scoring = scoring_D.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_2.on_click(on_out_res_clicked)
button_3.on_click(on_out_res_clicked_DT)
b = widgets.VBox([button_3, out_res_DT])
h1 = widgets.HTML('<h3>Select Decision Tree Hyperparameters</h3>')
frame = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, h5, l, m, b])
# Decision Tree Hyperparameter Ends
# Random Forest Hyperparameter Starts
n_estimators_R = widgets.Text(
value='100',
placeholder='enter any integer value',
description='n_estimators',
disabled=False)
criterion_R = widgets.SelectMultiple(
options = ['gini', 'entropy'],
value = ['gini'],
rows = 2,
description = 'Criterion',
disabled = False)
max_depth_R = widgets.Text(
value='5',
placeholder='enter any integer value',
description='Max_Depth',
disabled=False)
min_samples_split_R = widgets.Text(
value='2',
placeholder='enter any integer value',
description='min_samples_split',
style = {'description_width': 'initial'},
disabled=False)
min_samples_leaf_R = widgets.Text(
value='1',
placeholder='enter any integer value',
description='min_samples_leaf',
style = {'description_width': 'initial'},
disabled=False)
min_weight_fraction_leaf_R = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='min_weight_fraction',
style = {'description_width': 'initial'},
disabled=False)
max_features_R = widgets.SelectMultiple(
options = ['auto', 'sqrt', 'log2'],
value = ['auto'],
description = 'Max_Features',
style = {'description_width': 'initial'},
rows = 3,
disabled = False)
random_state_R = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Random_state',
style = {'description_width': 'initial'},
disabled=False)
max_leaf_nodes_R = widgets.Text(
value='2',
placeholder='enter any integer value',
description='Max_leaf_nodes',
style = {'description_width': 'initial'},
disabled=False)
min_impurity_decrease_R = widgets.Text(
value='0.0',
placeholder='enter any float value',
description='Min_impurity_decrease',
style = {'description_width': 'initial'},
disabled=False)
bootstrap_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Bootstrap',
rows = 2,
disabled = False)
oob_score_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'oob_score',
rows = 2,
disabled = False)
verbose_R = widgets.Text(
value='0',
placeholder='enter any integer value',
description='Verbose',
disabled=False)
warm_state_R = widgets.SelectMultiple(
options = [True, False],
value = [False],
description = 'Warm_State',
style = {'description_width': 'initial'},
rows = 2,
disabled = False)
class_weight_R = widgets.SelectMultiple(
options = ['balanced', 'balanced_subsample', 'None'],
value = ['balanced'],
description = 'Class_weight',
rows = 3,
style = {'description_width': 'initial'},
disabled = False)
ccp_alpha_R = widgets.Text(
value='0.0',
placeholder='enter any non-negative float value',
description='ccp_alpha',
disabled=False)
max_samples_R = widgets.Text(
value='2',
placeholder='enter any float value',
description='max_samples',
style = {'description_width': 'initial'},
disabled=False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_R = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_R = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_R = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_R, cv_R, scoring_R])
n_jobs_R = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_R = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_jobs_R, n_iter_R, n_iter_text])
first_row = widgets.HBox([n_estimators_R, criterion_R, max_depth_R])
second_row = widgets.HBox([min_samples_split_R, min_samples_leaf_R, min_weight_fraction_leaf_R])
third_row = widgets.HBox([max_features_R, max_leaf_nodes_R, min_impurity_decrease_R])
fourth_row = widgets.HBox([max_samples_R, bootstrap_R, oob_score_R])
fifth_row = widgets.HBox([warm_state_R, random_state_R, verbose_R])
sixth_row = widgets.HBox([class_weight_R, ccp_alpha_R])
button_4 = widgets.Button(description='Submit RF GridSearchCV')
out_res_RF = widgets.Output()
def on_out_res_clicked_RF(_):
with out_res_RF:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = pd.get_dummies(self.new_y,drop_first=True)
if std_scr.value == '1':
from sklearn.preprocessing import StandardScaler
scalar = StandardScaler()
self.new_X = pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test = pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Standard Scalar completed')
elif std_scr.value == '2':
from sklearn.preprocessing import RobustScaler
scalar = RobustScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Roubust Scalar completed')
elif std_scr.value == '3':
from sklearn.preprocessing import MinMaxScaler
scalar = MinMaxScaler()
self.new_X= pd.DataFrame(scalar.fit_transform(self.new_X), columns=self.new_X.columns, index=self.new_X.index)
self.new_test= pd.DataFrame(scalar.fit_transform(self.new_test), columns=self.new_test.columns, index=self.new_test.index)
print('> standardization using Minmax Scalar completed')
elif std_scr.value == 'n':
print('> No standardization done')
if apply_smote.value == 'y':
from imblearn.over_sampling import SMOTE
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.exceptions import DataConversionWarning
warnings.filterwarnings(action='ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', 'FutureWarning')
sm = SMOTE(random_state=42, n_jobs=-1)
new_X_cols = self.new_X.columns
new_y_cols = self.new_y.columns
self.new_X, self.new_y= sm.fit_resample(self.new_X, self.new_y)
self.new_X = pd.DataFrame(self.new_X, columns=new_X_cols)
self.new_y= pd.DataFrame(self.new_y, columns=new_y_cols)
print('> Oversampling using SMOTE completed')
else:
print('> No oversampling done')
param_grid = {'n_estimators' : [int(item) for item in n_estimators_R.value.split(',')],
'criterion': list(criterion_R.value),
'max_depth': [int(item) for item in max_depth_R.value.split(',')],
'min_samples_split' : [int(item) for item in min_samples_split_R.value.split(',')],
'min_samples_leaf' : [int(item) for item in min_samples_leaf_R.value.split(',')],
'min_weight_fraction_leaf' : [float(item) for item in min_weight_fraction_leaf_R.value.split(',')],
'max_features' : list(max_features_R.value),
'random_state' : [int(item) for item in random_state_R.value.split(',')],
'max_leaf_nodes' : [int(item) for item in max_leaf_nodes_R.value.split(',')],
'min_impurity_decrease' : [float(item) for item in min_impurity_decrease_R.value.split(',')],
'bootstrap' : list(bootstrap_R.value),
'oob_score' : list(oob_score_R.value),
'verbose' : [int(item) for item in verbose_R.value.split(',')],
'class_weight' : list(class_weight_R.value),
'ccp_alpha' : [float(item) for item in ccp_alpha_R.value.split(',')],
'max_samples' : [int(item) for item in max_samples_R.value.split(',')]
}
if self.tracking == True:
import mlflow
from mlflow import log_metric, log_param, log_artifacts
mlflow.sklearn.autolog()
estimator = RandomForestClassifier()
if search_param_R.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
scoring = scoring_L.value)
if search_param_R.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
with mlflow.start_run() as run:
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
mlflow.log_param("acc_score", acc_score)
mlflow.log_param("roc_score", roc_score)
mlflow.log_param("f1_macro", f1_macro)
mlflow.log_param("Best Estimator", self.classifier.best_estimator_)
if self.tracking == False:
estimator = RandomForestClassifier()
if search_param_R.value == 'GridSearch CV':
print('> GridSearch CV in progress...')
grid_lr = GridSearchCV(estimator=estimator,
param_grid = param_grid,
n_jobs = int(n_jobs_R.value),
cv = int(cv_L.value),
scoring = scoring_L.value)
if search_param_R.value == 'Random Search CV':
print('> RandomizedSearch CV in progress...')
grid_lr = RandomizedSearchCV(estimator=estimator,
param_distributions = param_grid,
cv = int(cv_L.value),
n_jobs = int(n_jobs_R.value),
n_iter = int(n_iter_L.value),
scoring = scoring_L.value)
self.classifier = grid_lr.fit(self.new_X.values, self.new_y.values)
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score
X_train, X_test, y_train, y_test = train_test_split(
self.new_X.values, self.new_y.values, test_size=0.2, random_state=1)
predictions = self.classifier.predict(X_test)
acc_score= (accuracy_score(y_test,predictions))
roc_score= (roc_auc_score(y_test,predictions))
f1_macro= (f1_score(y_test, predictions, average='macro'))
with out2:
clear_output()
print('\033[1m'+'\033[4m'+'Get_Params \n***************************************'+'\033[0m')
print(self.classifier.get_params)
print('\033[1m'+'\033[4m'+'Best_Estimator \n***********************************'+'\033[0m')
print(self.classifier.best_estimator_)
print('\033[1m'+'\033[4m'+'Metrics on Train data \n******************************'+'\033[0m')
print("acc_score = {:.3f} | roc_score = {:,.3f} | f1_score(macro) = {:,.3f}".format(acc_score, roc_score, f1_macro))
print('\033[1m'+'\033[4m'+'Predictions on stand_out test data \n******************************'+'\033[0m')
self.predict_HP = self.classifier.predict(self.new_test)
print('Prediction completed. \nUse dot operator in below code cell to access predict, for eg., pph.predict_HP, where pph is pywedge_HP class object')
msg = widgets.HTML('<h4>Please switch to output tab for results...</h4>')
msg_1 = widgets.HTML('<h4>Please run mlfow ui in command prompt to monitor HP tuning results</h4>')
display(msg)
if self.tracking==True:
display(msg_1)
button_4.on_click(on_out_res_clicked_RF)
b = widgets.VBox([button_4, out_res_RF])
h1 = widgets.HTML('<h3>Select Random Forest Hyperparameters</h3>')
frame_RF = widgets.VBox([header_2, pp_class, h1, first_row, second_row, third_row, fourth_row, fifth_row, sixth_row, h5, l, m, b])
# Random Forest Hyperparameter ends
# KNN Classifier Hyperparameter Starts
n_neighbors_k = widgets.Text(
value='5',
placeholder='enter any integer value',
description='n_neighbors',
disabled=False)
weights_k = widgets.SelectMultiple(
options = ['uniform', 'distance'],
value = ['uniform'],
rows = 2,
description = 'Weights',
disabled = False)
algorithm_k = widgets.SelectMultiple(
options = ['auto', 'ball_tree', 'kd_tree', 'brute'],
value = ['auto'],
rows = 4,
description = 'Algorithm',
disabled = False)
leaf_size_k = widgets.Text(
value='30',
placeholder='enter any integer value',
description='Leaf_Size',
disabled=False)
p_k = widgets.Text(
value='2',
placeholder='enter any integer value',
description='p (Power param)',
disabled=False)
metric_k = widgets.SelectMultiple(
options = ['euclidean', 'manhattan', 'chebyshev', 'minkowski'],
value = ['minkowski'],
rows = 4,
description = 'Metric',
disabled = False)
h5 = widgets.HTML('<h4>Select Grid/Random search Hyperparameters</h4>')
search_param_K = widgets.Dropdown(
options=['GridSearch CV', 'Random Search CV'],
value='GridSearch CV',
description='Choose Search Option: ',
style = {'description_width': 'initial'},
disabled=False)
cv_K = widgets.Text(
value='5',
placeholder='enter any integer value',
description='CV',
style = {'description_width': 'initial'},
disabled=False)
scoring_K = widgets.Dropdown(
options = ['accuracy', 'f1', 'roc_auc', 'balanced_accuracy'],
value = 'accuracy',
rows = 4,
description = 'Scoring',
disabled = False)
l = widgets.HBox([search_param_K, cv_K, scoring_K])
n_iter_K = widgets.Text(
value='10',
placeholder='enter any integer value',
description='n_iter',
style = {'description_width': 'initial'},
disabled=False)
n_jobs_K = widgets.Text(
value='1',
placeholder='enter any integer value',
description='n_jobs',
style = {'description_width': 'initial'},
disabled=False)
n_iter_text = widgets.HTML(value='<p><em>For Random Search</em></p>')
m = widgets.HBox([n_iter_K, n_iter_text])
first_row = widgets.HBox([n_neighbors_k, weights_k, algorithm_k])
second_row = widgets.HBox([leaf_size_k, p_k, metric_k])
button_5 = widgets.Button(description='Submit RF GridSearchCV')
out_res_K = widgets.Output()
def on_out_res_clicked_K(_):
with out_res_K:
clear_output()
import pandas as pd
self.new_X = self.X.copy(deep=True)
self.new_y = self.y
self.new_test = self.test.copy(deep=True)
categorical_cols = self.new_X.select_dtypes('object').columns.to_list()
for col in categorical_cols:
self.new_X[col].fillna(self.new_X[col].mode()[0], inplace=True)
numeric_cols = self.new_X.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_X[col].fillna(self.new_X[col].mean(), inplace=True)
test_categorical_cols = self.new_test.select_dtypes('object').columns.to_list()
for col in test_categorical_cols:
self.new_test[col].fillna(self.new_test[col].mode()[0], inplace=True)
numeric_cols = self.new_test.select_dtypes(['float64', 'int64']).columns.to_list()
for col in numeric_cols:
self.new_test[col].fillna(self.new_test[col].mean(), inplace=True)
if cat_info.value == '1':
for col in categorical_cols:
self.new_X[col] = self.new_X[col].astype('category')
self.new_X[col] = self.new_X[col].cat.codes
self.new_test[col] = self.new_test[col].astype('category')
self.new_test[col] = self.new_test[col].cat.codes
print('> Categorical columns converted using Catcodes')
if cat_info.value == '2':
self.new_X = pd.get_dummies(self.new_X,drop_first=True)
self.new_test = pd.get_dummies(self.new_test,drop_first=True)
print('> Categorical columns converted using Get_Dummies')
self.new_y = pd.DataFrame(self.train[[self.y]])
self.new_y = | pd.get_dummies(self.new_y,drop_first=True) | pandas.get_dummies |
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
col_index=['c1', 'c2', 'c3']
df=pd.DataFrame([[11,12,13],[21,22,23],[31,32,33]], columns=col_index)
ef=pd.DataFrame([[1100,1200,1300],[2100,2200,2300],[3100,3200,3300]], columns=col_index)
concat=pd.concat([df, ef], axis=1)
concat.reset_index()
print(concat)
df=pd.DataFrame([[11,12,13],[21,22,23],[31,32,33]],
columns=col_index,
index=[1, 2, 3])
ef=pd.DataFrame([[1100,1200,1300],[2100,2200,2300],[3100,3200,3300]],
columns=col_index,
index=[2, 3, 4])
concat=pd.concat([df, ef])
concat.reset_index()
print(concat)
datafile='earthquakes.csv'
#useful if you want the directory this script is in
if '__file__' in dir():
path, _=os.path.split(__file__)
else: path=os.getcwd()
filename=os.path.join(path, datafile)
df= | pd.read_csv(filename, parse_dates=[0]) | pandas.read_csv |
from mrjob.job import MRJob
from mrjob.protocol import PickleProtocol
# # mongo clients libs
from pymongo import MongoClient, ASCENDING, DESCENDING
from bson.objectid import ObjectId
# # Generic imports
import glob
import pandas as pd
import numpy as np
from scipy.stats import percentileofscore
from json import load
from datetime import datetime, timedelta
from time import mktime
from dateutil.relativedelta import relativedelta
import ast
import re
import bee_data_cleaning as dc
from bee_dataframes import create_dataframes
from edinet_models.edinet_models import baseline_calc_pyemis_old, baseline_calc_pyemis_new, monthly_calc
class MRJob_align(MRJob):
INTERNAL_PROTOCOL = PickleProtocol
def mapper_init(self):
fn = glob.glob('*.json')
self.config = load(open(fn[0]))
self.mongo = MongoClient(self.config['mongodb']['host'], self.config['mongodb']['port'])
self.mongo[self.config['mongodb']['db']].authenticate(
self.config['mongodb']['username'],
self.config['mongodb']['password']
)
self.devices = self.config['devices']
self.task_id = self.config['task_id']
def reducer_init(self):
# recover json configuration uploaded with script
fn = glob.glob('*.json')
self.config = load(open(fn[0]))
self.mongo = MongoClient(self.config['mongodb']['host'], self.config['mongodb']['port'])
self.mongo[self.config['mongodb']['db']].authenticate(
self.config['mongodb']['username'],
self.config['mongodb']['password']
)
self.company = self.config['company']
self.devices = self.config['devices']
self.stations = self.config['stations']
self.task_id = self.config['task_id']
def mapper(self, _, doc): #we don't have value -> input protocol pickleValue which means no key is read
# emits modelling_units as key
# emits deviceId, consumption, ts
try:
ret = doc.split('\t')
modelling_units = self.devices[str(ret[0])]
d = {
'deviceid': ret[0],
'date': datetime.fromtimestamp(float(ret[1])),
'energyType': ret[4]
}
except Exception as e:
pass
try:
d['value'] = float(ret[2])
except:
d['value'] = None
try:
d['accumulated'] = float(ret[3])
except:
d['accumulated'] = None
for modelling_unit in modelling_units:
yield modelling_unit, d
def reducer(self, key, values):
# obtain the needed info from the key
modelling_unit, multipliers, model = key.split('~')
multipliers = ast.literal_eval(multipliers) #string to dict
multiplier = {}
for i in multipliers:
multiplier[i['deviceId']] = i['multiplier']
# create dataframe from values list
v = []
for i in values:
v.append(i)
df = pd.DataFrame.from_records(v, index='date', columns=['value','accumulated','date','deviceid','energyType'])
df = df.sort_index()
# meter replacement
mongo_modellingUnits = self.mongo[self.config['mongodb']['db']][
self.config['mongodb']['modelling_units_collection']]
df = create_dataframes.meter_replacement(modelling_unit, self.company, df, mongo_modellingUnits)
# regexp to identify hourly data
mongo_building = self.mongo[self.config['mongodb']['db']][
self.config['mongodb']['building_collection']]
mongo_reporting = self.mongo[self.config['mongodb']['db']][
self.config['mongodb']['reporting_collection']]
# get station
station = self.stations[str(df.deviceid[0])]
# apply the multiplier over each deviceId value and sum all the values
grouped = df.groupby('deviceid')
# Detectem si l'edifici te dades horaries.
try:
# nomes ho fem per gas i electricitat de moment.
modelling_unit_item = mongo_modellingUnits.find_one(
{"modellingUnitId": modelling_unit, "companyId": self.company})
valid_type = False
# edificis greame
if "energyType" in modelling_unit_item:
if modelling_unit_item["energyType"] in ["electricityConsumption", "gasConsumption", "heatConsumption"]:
valid_type = True
# edificis eloi
if "label" in modelling_unit_item:
if modelling_unit_item["label"] in ["electricityConsumption", "gasConsumption"]:
valid_type = True
except Exception as e:
print(e)
# si el tipus es correcte, hem d aconseguir el buildingId.
buildingId = None
if valid_type:
# mirem si el podem aconseguir directament (edificis eloi)
building_item = mongo_building.find_one({"modellingUnits": modelling_unit, "companyId": self.company})
if building_item and "buildingId" in building_item:
buildingId = building_item['buildingId']
else:
# Sino, ho mirem al reporting unit (edificis greame)
reporting_item = mongo_reporting.find_one(
{"modelling_Units": modelling_unit, "companyId": self.company})
if reporting_item and "buildingId" in reporting_item:
buildingId = reporting_item['buildingId']
# si el buildingId es None no cal fer res.
if buildingId:
# Comparem per cada device, energy type si hi ha dades horaries o no
# pero nomes ho mirem per les ultimes 12 setmanes
# com que podem tenir dades de diferents taules, agafem el maxim del df per tenir l'ultim timestamp
# i li restem 12 setmanes, despres fem un filtre per obtenir nomes dades a partir d'alli
last_date = max(df.index)
weeks12 = timedelta(days=7 * 12)
starting_date = last_date - weeks12
hourly_data_device = False
for name, group in grouped:
if hourly_data_device:
break
energy_type_grouped = group.groupby('energyType')
for energy_type, energy_type_group in energy_type_grouped:
group_new = energy_type_group.reset_index().drop_duplicates(subset='date',
keep='last').set_index(
'date').sort_index()
group_new = group_new[starting_date:]
freq = create_dataframes.calculate_frequency(group_new)
# si no hi ha freq(nomes hi ha un timestamp o cap) ignora el device i ves al seguent
if not freq:
continue
day_delta = timedelta(hours=1)
if freq <= day_delta:
# Si hi han dades horaries, amb un frequencia mes petita que 1 dia en aquest modelling unit
# ho marquem i acabem amb el bucle
hourly_data_device = True
break
# Guardem a mongo el resultat per aquest device i energytype
mongo_building.update(
{"buildingId": buildingId, "companyId": self.company},
{"$set": {"hourlyDataDevice.{}".format(modelling_unit): hourly_data_device}},
upsert=False,
)
# Finalment, agafem el nou recurs de building i mirem si hi ha cap dispositiu amb dades horaries.
building_doc = mongo_building.find_one({"buildingId": buildingId, "companyId": self.company})
if building_doc['hourlyDataDevice']:
mongo_building.update(
{"buildingId": buildingId, "companyId": self.company},
{"$set": {"hourlyData": any(building_doc['hourlyDataDevice'].values())}},
upsert=False,
)
mongo = self.mongo[self.config['mongodb']['db']][self.config['mongodb']['collection']]
mongo_weather = self.mongo[self.config['mongodb']['db']][
self.config['mongodb']['weather_collection']]
# get station temperatures list
station_doc = mongo_weather.find_one({'stationId': station},{'values': True, 'timestamps': True, })
# if not station, finish.
if not station_doc:
return
# create temperature dataframe
temps = []
for ts, t in zip(station_doc['timestamps'], station_doc['values']):
val = {'date': ts, 'temperature': t}
temps.append(val)
tdf = | pd.DataFrame.from_records(temps, index='date', columns=['temperature', 'date']) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return read_table(*args, **kwds)
def test_sniff_delimiter(self):
text = """index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data = self.read_csv(StringIO(text), index_col=0, sep=None)
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
data2 = self.read_csv(StringIO(text), index_col=0, delimiter='|')
tm.assert_frame_equal(data, data2)
text = """ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
"""
data3 = self.read_csv(StringIO(text), index_col=0,
sep=None, skiprows=2)
tm.assert_frame_equal(data, data3)
text = u("""ignore this
ignore this too
index|A|B|C
foo|1|2|3
bar|4|5|6
baz|7|8|9
""").encode('utf-8')
s = BytesIO(text)
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
data4 = self.read_csv(s, index_col=0, sep=None, skiprows=2,
encoding='utf-8')
tm.assert_frame_equal(data, data4)
def test_regex_separator(self):
data = """ A B C D
a 1 2 3 4
b 1 2 3 4
c 1 2 3 4
"""
df = self.read_table(StringIO(data), sep='\s+')
expected = self.read_csv(StringIO(re.sub('[ ]+', ',', data)),
index_col=0)
self.assertIsNone(expected.index.name)
tm.assert_frame_equal(df, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = [[1, 2334., 5],
[10, 13, 10]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = [[1, 2., 4],
[5, np.nan, 10.]]
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = self.read_csv(StringIO(data_expected), header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From <NAME>: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assertRaisesRegexp(ValueError, "must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assertRaisesRegexp(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_fwf_colspecs_is_list_or_tuple(self):
with tm.assertRaisesRegexp(TypeError,
'column specifications must be a list or '
'tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(self.data1),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
with tm.assertRaisesRegexp(TypeError,
'Each column specification must be.+'):
read_fwf(StringIO(self.data1), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
self.assertTrue(len(res))
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn", "dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest(
"Bytes-related test - only needs to work on Python 3")
result = pd.read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = pd.DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
data = BytesIO("שלום::1234\n562::123".encode('cp1255'))
result = pd.read_table(data, sep="::", engine='python',
encoding='cp1255')
expected = pd.DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_verbose_import(self):
text = """a,b,c,d
one,1,2,3
one,1,2,3
,1,2,3
one,1,2,3
,1,2,3
,1,2,3
one,1,2,3
two,1,2,3"""
buf = StringIO()
sys.stdout = buf
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True)
self.assertEqual(
buf.getvalue(), 'Filled 3 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
buf = StringIO()
sys.stdout = buf
text = """a,b,c,d
one,1,2,3
two,1,2,3
three,1,2,3
four,1,2,3
five,1,2,3
,1,2,3
seven,1,2,3
eight,1,2,3"""
try:
# it works!
df = self.read_csv(StringIO(text), verbose=True, index_col=0)
self.assertEqual(
buf.getvalue(), 'Filled 1 NA values in column a\n')
finally:
sys.stdout = sys.__stdout__
def test_float_precision_specified(self):
# Should raise an error if float_precision (C parser option) is
# specified
with tm.assertRaisesRegexp(ValueError, "The 'float_precision' option "
"is not supported with the 'python' engine"):
self.read_csv(StringIO('a,b,c\n1,2,3'), float_precision='high')
def test_iteration_open_handle(self):
if PY3:
raise nose.SkipTest(
"won't work in Python 3 {0}".format(sys.version_info))
with tm.ensure_clean() as path:
with open(path, 'wb') as f:
f.write('AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG')
with open(path, 'rb') as f:
for line in f:
if 'CCC' in line:
break
try:
read_table(f, squeeze=True, header=None, engine='c')
except Exception:
pass
else:
raise ValueError('this should not happen')
result = read_table(f, squeeze=True, header=None,
engine='python')
expected = Series(['DDD', 'EEE', 'FFF', 'GGG'], name=0)
tm.assert_series_equal(result, expected)
def test_iterator(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_single_line(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_malformed(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_skip_footer(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with the C parser is fixed
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = self.read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_decompression_regex_sep(self):
# GH 6607
# This is a copy which should eventually be moved to ParserTests
# when the issue with the C parser is fixed
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the issue with multi-level index is fixed in the C parser.
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
# GH 6893
data = ' A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9'
expected = DataFrame.from_records([(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],
columns=list('abcABC'), index=list('abc'))
actual = self.read_table(StringIO(data), sep='\s+')
tm.assert_frame_equal(actual, expected)
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
class TestFwfColspaceSniffing(tm.TestCase):
def test_full_file(self):
# File with all values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = '''index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34'''
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = '''
Account Name Balance CreditLimit AccountCreated
101 <NAME> 9315.45 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 <NAME> 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65 5000.00 2/5/2007
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columsn
test = '''
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00 8/6/2003
868 5/25/1985
761 <NAME>-Smith 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = '''
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 <NAME> 90.00 1000.00
761 <NAME> 49654.87 100000.00 12/5/2006
317 <NAME> 789.65
'''.strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r'''
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~<NAME>
33+++122.33\\\bar.........<NAME>
++44~~~~12.01 baz~~<NAME>
~~55 11+++foo++++<NAME>-Smith
..66++++++.03~~~bar <NAME>
'''.strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
raise nose.SkipTest(
'Bytes-related test - only needs to work on Python 3')
test = '''
שלום שלום
ום שלל
של ום
'''.strip('\r\n')
expected = pd.read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)], header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(BytesIO(test.encode('utf8')),
header=None, encoding='utf8'))
class CParserTests(ParserTests):
""" base class for CParser Testsing """
def test_buffer_overflow(self):
# GH9205
# test certain malformed input files that cause buffer overflows in
# tokenizer.c
malfw = "1\r1\r1\r 1\r 1\r" # buffer overflow in words pointer
malfs = "1\r1\r1\r 1\r 1\r11\r" # buffer overflow in stream pointer
malfl = "1\r1\r1\r 1\r 1\r11\r1\r" # buffer overflow in lines pointer
for malf in (malfw, malfs, malfl):
try:
df = self.read_table(StringIO(malf))
except Exception as cperr:
self.assertIn(
'Buffer overflow caught - possible malformed input file.', str(cperr))
def test_buffer_rd_bytes(self):
# GH 12098
# src->buffer can be freed twice leading to a segfault if a corrupt
# gzip file is read with read_csv and the buffer is filled more than
# once before gzip throws an exception
data = '\x1F\x8B\x08\x00\x00\x00\x00\x00\x00\x03\xED\xC3\x41\x09' \
'\x00\x00\x08\x00\xB1\xB7\xB6\xBA\xFE\xA5\xCC\x21\x6C\xB0' \
'\xA6\x4D' + '\x55' * 267 + \
'\x7D\xF7\x00\x91\xE0\x47\x97\x14\x38\x04\x00' \
'\x1f\x8b\x08\x00VT\x97V\x00\x03\xed]\xefO'
for i in range(100):
try:
_ = self.read_csv(StringIO(data),
compression='gzip',
delim_whitespace=True)
except Exception as e:
pass
class TestCParserHighMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = False
return read_table(*args, **kwds)
def test_compact_ints(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_parse_dates_empty_string(self):
# #2263
s = StringIO("Date, test\n2012-01-01, 1\n,2")
result = self.read_csv(s, parse_dates=["Date"], na_filter=False)
self.assertTrue(result['Date'].isnull()[1])
def test_usecols(self):
raise nose.SkipTest(
"Usecols is not supported in C High Memory engine.")
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
# check with delim_whitespace=True
df = self.read_csv(StringIO(data.replace(',', ' ')), comment='#',
delim_whitespace=True)
tm.assert_almost_equal(df.values, expected)
# check with custom line terminator
df = self.read_csv(StringIO(data.replace('\n', '*')), comment='#',
lineterminator='*')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_skiprows_lineterminator(self):
# GH #9079
data = '\n'.join(['SMOSMANIA ThetaProbe-ML2X ',
'2007/01/01 01:00 0.2140 U M ',
'2007/01/01 02:00 0.2141 M O ',
'2007/01/01 04:00 0.2142 D M '])
expected = pd.DataFrame([['2007/01/01', '01:00', 0.2140, 'U', 'M'],
['2007/01/01', '02:00', 0.2141, 'M', 'O'],
['2007/01/01', '04:00', 0.2142, 'D', 'M']],
columns=['date', 'time', 'var', 'flag',
'oflag'])
# test with the three default lineterminators LF, CR and CRLF
df = self.read_csv(StringIO(data), skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data.replace('\n', '\r\n')),
skiprows=1, delim_whitespace=True,
names=['date', 'time', 'var', 'flag', 'oflag'])
tm.assert_frame_equal(df, expected)
def test_trailing_spaces(self):
data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n"
expected = pd.DataFrame([[1., 2., 4.],
[5.1, np.nan, 10.]])
# this should ignore six lines including lines with trailing
# whitespace and blank lines. issues 8661, 8679
df = self.read_csv(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data.replace(',', ' ')),
header=None, delim_whitespace=True,
skiprows=[0, 1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
# test skipping set of rows after a row with trailing spaces, issue
# #8983
expected = pd.DataFrame({"A": [1., 5.1], "B": [2., np.nan],
"C": [4., 10]})
df = self.read_table(StringIO(data.replace(',', ' ')),
delim_whitespace=True,
skiprows=[1, 2, 3, 5, 6], skip_blank_lines=True)
tm.assert_frame_equal(df, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_empty_lines(self):
data = """\
A,B,C
1,2.,4.
5.,NaN,10.0
-70,.4,1
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
df = self.read_csv(StringIO(data.replace(',', ' ')), sep='\s+')
tm.assert_almost_equal(df.values, expected)
expected = [[1., 2., 4.],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[5., np.nan, 10.],
[np.nan, np.nan, np.nan],
[-70., .4, 1.]]
df = self.read_csv(StringIO(data), skip_blank_lines=False)
tm.assert_almost_equal(list(df.values), list(expected))
def test_whitespace_lines(self):
data = """
\t \t\t
\t
A,B,C
\t 1,2.,4.
5.,NaN,10.0
"""
expected = [[1, 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
# empty frame
# GH12048
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_dtype_and_names_error(self):
# GH 8833
# passing both dtype and names resulting in an error reporting issue
data = """
1.0 1
2.0 2
3.0 3
"""
# base cases
result = self.read_csv(StringIO(data), sep='\s+', header=None)
expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), sep='\s+',
header=None, names=['a', 'b'])
expected = DataFrame(
[[1.0, 1], [2.0, 2], [3.0, 3]], columns=['a', 'b'])
tm.assert_frame_equal(result, expected)
# fallback casting
result = self.read_csv(StringIO(
data), sep='\s+', header=None, names=['a', 'b'], dtype={'a': np.int32})
expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
tm.assert_frame_equal(result, expected)
data = """
1.0 1
nan 2
3.0 3
"""
# fallback casting, but not castable
with tm.assertRaisesRegexp(ValueError, 'cannot safely convert'):
self.read_csv(StringIO(data), sep='\s+', header=None,
names=['a', 'b'], dtype={'a': np.int32})
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
class TestCParserLowMemory(CParserTests, tm.TestCase):
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_csv(*args, **kwds)
def read_table(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'c'
kwds['low_memory'] = True
kwds['buffer_lines'] = 2
return read_table(*args, **kwds)
def test_compact_ints(self):
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.to_records(index=False).dtype, ex_dtype)
def test_compact_ints_as_recarray(self):
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
data = ('0,1,0,0\n'
'1,1,0,0\n'
'0,1,0,1')
result = read_csv(StringIO(data), delimiter=',', header=None,
compact_ints=True, as_recarray=True)
ex_dtype = np.dtype([(str(i), 'i1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
result = read_csv(StringIO(data), delimiter=',', header=None,
as_recarray=True, compact_ints=True,
use_unsigned=True)
ex_dtype = np.dtype([(str(i), 'u1') for i in range(4)])
self.assertEqual(result.dtype, ex_dtype)
def test_precise_conversion(self):
# GH #8002
tm._skip_if_32bit()
from decimal import Decimal
normal_errors = []
precise_errors = []
for num in np.linspace(1., 2., num=500): # test numbers between 1 and 2
text = 'a\n{0:.25}'.format(num) # 25 decimal digits of precision
normal_val = float(self.read_csv(StringIO(text))['a'][0])
precise_val = float(self.read_csv(
StringIO(text), float_precision='high')['a'][0])
roundtrip_val = float(self.read_csv(
StringIO(text), float_precision='round_trip')['a'][0])
actual_val = Decimal(text[2:])
def error(val):
return abs(Decimal('{0:.100}'.format(val)) - actual_val)
normal_errors.append(error(normal_val))
precise_errors.append(error(precise_val))
# round-trip should match float()
self.assertEqual(roundtrip_val, float(text[2:]))
self.assertTrue(sum(precise_errors) <= sum(normal_errors))
self.assertTrue(max(precise_errors) <= max(normal_errors))
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'object')
def test_pass_dtype_as_recarray(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
if compat.is_platform_windows():
raise nose.SkipTest(
"segfaults on win-64, only when all tests are run")
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'},
as_recarray=True)
self.assertEqual(result['one'].dtype, 'u1')
self.assertEqual(result['two'].dtype, 'S1')
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'), np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={'one': 'u1'})
expected = pd.concat([Series([], name='one', dtype='u1')] * 2, axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
### FIXME in GH9424
raise nose.SkipTest(
"GH 9424; known failure read_csv with duplicate columns")
data = 'one,one'
result = self.read_csv(
StringIO(data), mangle_dupe_cols=False, dtype={0: 'u1', 1: 'f'})
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one', dtype='f')], axis=1)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_usecols_dtypes(self):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(0, 1, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
result2 = self.read_csv(StringIO(data), usecols=(0, 2),
names=('a', 'b', 'c'),
header=None,
converters={'a': str},
dtype={'b': int, 'c': float},
)
self.assertTrue((result.dtypes == [object, np.int, np.float]).all())
self.assertTrue((result2.dtypes == [object, np.float]).all())
def test_usecols_implicit_index_col(self):
# #2654
data = 'a,b,c\n4,apple,bat,5.7\n8,orange,cow,10'
result = self.read_csv(StringIO(data), usecols=['a', 'b'])
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(self):
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
result = self.read_csv(StringIO(data), delim_whitespace=True,
usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(self):
# #2733
data = 'a b c\n4 apple bat 5.7\n8 orange cow 10'
df = self.read_csv(StringIO(data), sep='\s+', usecols=('a', 'b'))
expected = DataFrame({'a': ['apple', 'orange'],
'b': ['bat', 'cow']}, index=[4, 8])
tm.assert_frame_equal(df, expected)
def test_pure_python_failover(self):
data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"
result = self.read_csv(StringIO(data), comment='#')
expected = DataFrame({'a': [1, 4], 'b': [2, 5], 'c': [3, 6]})
tm.assert_frame_equal(result, expected)
def test_decompression(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='gzip')
tm.assert_frame_equal(result, expected)
result = self.read_csv(open(path, 'rb'), compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
result = self.read_csv(path, compression='bz2')
tm.assert_frame_equal(result, expected)
# result = self.read_csv(open(path, 'rb'), compression='bz2')
# tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
with open(path, 'rb') as fin:
if compat.PY3:
result = self.read_csv(fin, compression='bz2')
tm.assert_frame_equal(result, expected)
else:
self.assertRaises(ValueError, self.read_csv,
fin, compression='bz2')
def test_decompression_regex_sep(self):
try:
import gzip
import bz2
except ImportError:
raise nose.SkipTest('need gzip and bz2 to run')
data = open(self.csv1, 'rb').read()
data = data.replace(b',', b'::')
expected = self.read_csv(self.csv1)
with tm.ensure_clean() as path:
tmp = gzip.GzipFile(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
# Test currently only valid with the python engine because of
# regex sep. Temporarily copied to TestPythonParser.
# Here test for ValueError when passing regex sep:
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='gzip')
tm.assert_frame_equal(result, expected)
with tm.ensure_clean() as path:
tmp = bz2.BZ2File(path, mode='wb')
tmp.write(data)
tmp.close()
# GH 6607
with tm.assertRaisesRegexp(ValueError, 'regex sep'): # XXX
result = self.read_csv(path, sep='::', compression='bz2')
tm.assert_frame_equal(result, expected)
self.assertRaises(ValueError, self.read_csv,
path, compression='bz3')
def test_memory_map(self):
# it works!
result = self.read_csv(self.csv1, memory_map=True)
def test_disable_bool_parsing(self):
# #2090
data = """A,B,C
Yes,No,Yes
No,Yes,Yes
Yes,,Yes
No,No,No"""
result = read_csv(StringIO(data), dtype=object)
self.assertTrue((result.dtypes == object).all())
result = read_csv(StringIO(data), dtype=object, na_filter=False)
self.assertEqual(result['B'][2], '')
def test_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
df2 = self.read_csv(StringIO(data), sep=';', decimal=',')
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_custom_lineterminator(self):
data = 'a,b,c~1,2,3~4,5,6'
result = self.read_csv(StringIO(data), lineterminator='~')
expected = self.read_csv(StringIO(data.replace('~', '\n')))
tm.assert_frame_equal(result, expected)
data2 = data.replace('~', '~~')
result = self.assertRaises(ValueError, read_csv, StringIO(data2),
lineterminator='~~')
def test_raise_on_passed_int_dtype_with_nas(self):
# #2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
self.assertRaises(Exception, read_csv, StringIO(data), sep=",",
skipinitialspace=True,
dtype={'DOY': np.int64})
def test_na_trailing_columns(self):
data = """Date,Currenncy,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
result = self.read_csv(StringIO(data))
self.assertEqual(result['Date'][1], '2012-05-12')
self.assertTrue(result['UnitPrice'].isnull().all())
def test_parse_ragged_csv(self):
data = """1,2,3
1,2,3,4
1,2,3,4,5
1,2
1,2,3,4"""
nice_data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
result = self.read_csv(StringIO(data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
expected = self.read_csv(StringIO(nice_data), header=None,
names=['a', 'b', 'c', 'd', 'e'])
tm.assert_frame_equal(result, expected)
# too many columns, cause segfault if not careful
data = "1,2\n3,4,5"
result = self.read_csv(StringIO(data), header=None,
names=lrange(50))
expected = self.read_csv(StringIO(data), header=None,
names=lrange(3)).reindex(columns=lrange(50))
tm.assert_frame_equal(result, expected)
def test_tokenize_CR_with_quoting(self):
# #3453, this doesn't work with Python parser for some reason
data = ' a,b,c\r"a,b","e,d","f,f"'
result = self.read_csv(StringIO(data), header=None)
expected = self.read_csv(StringIO(data.replace('\r', '\n')),
header=None)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data))
expected = self.read_csv(StringIO(data.replace('\r', '\n')))
tm.assert_frame_equal(result, expected)
def test_raise_on_no_columns(self):
# single newline
data = "\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
# test with more than a single newline
data = "\n\n\n"
self.assertRaises(ValueError, self.read_csv, StringIO(data))
def test_warn_if_chunks_have_mismatched_type(self):
# Issue #3866 If chunks are different types and can't
# be coerced using numerical types, then issue warning.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(DtypeWarning):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_invalid_c_parser_opts_with_not_c_parser(self):
from pandas.io.parsers import _c_parser_defaults as c_defaults
data = """1,2,3,,
1,2,3,4,
1,2,3,4,5
1,2,,,
1,2,3,4,"""
engines = 'python', 'python-fwf'
for default in c_defaults:
for engine in engines:
kwargs = {default: object()}
with tm.assertRaisesRegexp(ValueError,
'The %r option is not supported '
'with the %r engine' % (default,
engine)):
read_csv(StringIO(data), engine=engine, **kwargs)
def test_passing_dtype(self):
# GH 6607
# This is a copy which should eventually be merged into ParserTests
# when the dtype argument is supported by all engines.
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path, dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
def test_fallback_to_python(self):
# GH 6607
data = 'a b c\n1 2 3'
# specify C engine with C-unsupported options (raise)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep=None,
delim_whitespace=False)
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', sep='\s')
with tm.assertRaisesRegexp(ValueError, 'does not support'):
self.read_table(StringIO(data), engine='c', skip_footer=1)
def test_raise_on_sep_with_delim_whitespace(self):
# GH 6607
data = 'a b c\n1 2 3'
with tm.assertRaisesRegexp(ValueError, 'you can only specify one'):
self.read_table(StringIO(data), sep='\s', delim_whitespace=True)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), delim_whitespace=True,
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), lineterminator='\n',
skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with tm.assertRaises(TypeError):
pd.read_csv(StringIO(data), header=arg)
with | tm.assertRaises(TypeError) | pandas.util.testing.assertRaises |
# import pandas and numpy, and load the nls data
import pandas as pd
pd.set_option('display.width', 80)
pd.set_option('display.max_columns', 7)
pd.set_option('display.max_rows', 100)
pd.options.display.float_format = '{:,.0f}'.format
nls97 = pd.read_csv("data/nls97f.csv")
nls97.set_index("personid", inplace=True)
nls97add = pd.read_csv("data/nls97add.csv")
# look at some of the nls data
nls97.head()
nls97.shape
nls97add.head()
nls97add.shape
# check for unique ids
nls97.originalid.nunique()==nls97.shape[0]
nls97add.originalid.nunique()==nls97add.shape[0]
# create some mismatched ids
nls97 = nls97.sort_values('originalid')
nls97add = nls97add.sort_values('originalid')
nls97.iloc[0:2, -1] = nls97.iloc[0:2, -1] + 10000
nls97.originalid.head(2)
nls97add.iloc[0:2, 0] = nls97add.iloc[0:2, 0] + 20000
nls97add.originalid.head(2)
# use join to do a left join
nlsnew = nls97.join(nls97add.set_index(['originalid']))
nlsnew.loc[nlsnew.originalid>9999, ['originalid','gender','birthyear','motherage','parentincome']]
# do a left join with merge
nlsnew = | pd.merge(nls97, nls97add, on=['originalid'], how="left") | pandas.merge |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 12:15:22 2019
@author: seanselzer
"""
import pandas as pd
import os
import numpy as np
import datetime
import re
from pathlib import Path
from detector_efficiencies_2_1 import create_effdf
from radecc_reader_lvl1_2_0 import slope_calculator
#____________________________________________________________________________________________________________________________________________________________
def amalgam_2(eff_df, ra223_lambda, ra224_lambda, log_df, sample_volume, sample_volume_error, sample_variable, sub_sample_variable,
spike_sensitivity, equilibration_time_variable, output_directory, sample_mid_time, sample_mid_date,
linear_data_type, DDMMYYY_DateFormat, thstd, acstd, blank, detector_dict, detector_226_efficiencies_dict ):
main_samplelist = []
print('\n---Creating sample results dataframe---\n')
print(linear_data_type)
if linear_data_type == False:
for i in range(len(log_df[sample_variable])):
sample_series = log_df[sample_variable].iloc[i]
print('\n', type(sample_series), sample_series)
for dirName, subdirList, fileList in os.walk(output_directory/'Read_Files'/sample_series):
dirName = Path(dirName)
#print(int(log_df[sub_sample_variable].iloc[i]), [int(s) for s in re.findall(r'-?\d+\.?\d*',dirName.split('/')[-1])][0],dirName.split('/')[-1])
# print ('££££££££',log_df[sub_sample_variable].iloc[i], dirName.parts)
if str(log_df[sub_sample_variable].iloc[i])in dirName.parts[-1]:
for file in fileList:
# print (file, sample_series, log_df[sub_sample_variable].iloc[i])
main_samplelist.append(list(log_df.iloc[i])+[os.path.join(dirName, file)])
else:
sample_set = list(set(log_df[sample_variable]))
#print(sample_set)
for i in range(len(log_df[sample_variable])):
sample_series = log_df[sample_variable].iloc[i]
print('\n', sample_series)
for dirName, subdirList, fileList in os.walk(output_directory/'Read_Files'/sample_series):
dirName = Path(dirName)
print (log_df[sample_variable].iloc[i], sample_series, dirName.parts[-1].lower(), '\n', dirName)
if str(log_df[sample_variable].iloc[i]) in dirName.parts[-1].lower():
for file in fileList:
print (file, log_df[sample_variable].iloc[i],sample_series)
main_samplelist.append(list(log_df.iloc[i])+[os.path.join(dirName, file)])
sample_array = np.array(main_samplelist)
temp_df = pd.DataFrame(sample_array)
temp_df.columns = log_df.columns.values.tolist()+['Filepath']
lvl1_calc_list = []
for i in range(len(temp_df[sample_variable])):
lvl1_calc_list.append(slope_calculator(output_directory, detector_dict, Path(temp_df['Filepath'].iloc[i]), spike_sensitivity, equilibration_time_variable, DDMMYYY_DateFormat, thstd, acstd, blank))
lvl1_calc_df = pd.DataFrame(lvl1_calc_list, columns = ['Read_Start_Time', 'Read_End_Time', 'Slope', 'stderr_slope', 'cnt219', 'cnt219_abserr', 'cnt220', 'cnt220_abserr', 'cpm_219', 'err_219', 'cpm_220', 'err_220', 'cpm_Tot', 'err_Tot', 'y219cc', 'y219cc_err',
'y220cc', 'y220cc_err', 'corr219', 'corr219_err', 'corr220', 'corr220_err','final219',
'final220', 'Read_Runtime', 'final219_err', 'final220_err', 'cntTot_abserr',
'errslope_abs', 'Detector_Name', 'Cartridge_Type', 'Filename_Read_Interval', 'Spike_Value', 'Error_List'])
lvl1_main_df = pd.concat([temp_df, lvl1_calc_df], axis=1)
# print(pd.to_datetime(log_df.Date+' '+log_df[sample_mid_time], dayfirst = True))
lvl1_main_df['Mid_Sample_Datetime'] = pd.to_datetime(lvl1_main_df[sample_mid_date]+' '+lvl1_main_df[sample_mid_time], dayfirst = DDMMYYY_DateFormat)
# print(lvl1_main_df['Mid_Sample_Datetime']-pd.to_datetime(log_df.Date+' '+log_df[sample_mid_time], dayfirst = True))
lvl1_main_df['Mid_Read_Datetime'] = pd.to_datetime(lvl1_main_df['Read_Start_Time'], dayfirst=DDMMYYY_DateFormat) + pd.to_timedelta(lvl1_main_df['Read_Runtime']/2, unit='m')
detector_226_calibration_values_list = []
detector_226_efficiencies_list = []
for detector in lvl1_main_df.Detector_Name:
if detector in detector_dict.keys():
detector_226_calibration_values_list.append(detector_dict[detector])
detector_226_efficiencies_list.append(detector_226_efficiencies_dict[detector])
else:
detector_226_calibration_values_list.append(-999)
detector_226_efficiencies_list.append(-999)
lvl1_main_df['Detector 226 Calibration Factor'] = detector_226_calibration_values_list
lvl1_main_df['Detector 226 Efficiency'] = detector_226_efficiencies_list
blankcorr219 = []
blankcorr219_err =[]
blankcorr220 = []
blankcorr220_err = []
dpm219 = []
dpm219_err = []
dpm219_thstdonly = []
dpm219_thstdonly_err = []
dpm220 = []
dpm220_err = []
dpm220tot = []
vdpm219 = []
vdpm219_err = []
vdpm219_thstdonly = []
vdpm219_thstdonly_err = []
vdpm220 = []
vdpm220_err = []
vdpm220tot = []
t1 = []
vdpm226 = []
vdpm226_err = []
for i in range(len(lvl1_main_df)):
if lvl1_main_df['Detector_Name'][i].lower() in list(eff_df.Detector):
#final219 - 219 channel blank for relevant detector
blankcorr219.append(float(lvl1_main_df.final219[i] - eff_df.Average_bkg_219[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]))
#error associated with blankcorr219 correction added
blankcorr219_err.append(float(np.sqrt(eff_df.Standard_Deviation_Blank_219[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]**2 + lvl1_main_df['final219_err'][i]**2)))
#final220 - 220 channel blank for relevant detector
blankcorr220.append(float(lvl1_main_df.final220[i] - eff_df.Average_bkg_220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]))
#error associated with blankcorr220 correction added
blankcorr220_err.append(float(np.sqrt(eff_df.Standard_Deviation_Blank_220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]**2 + lvl1_main_df['final220_err'][i]**2)))
#dpm219 calculation
dpm219.append(float(blankcorr219[-1]/eff_df.Average_E219[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]))
#dpm219 error
dpm219_err.append(float((np.sqrt((blankcorr219_err[-1]/eff_df.Average_E219[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()])**2 + ((lvl1_main_df['final219'][i]*eff_df.Standard_Deviation_E219[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()])/eff_df.Average_E219[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]**2)**2))))
#dpm220 calculation
dpm220.append(float(blankcorr220[-1]/eff_df.Average_E220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]))
#dpm219 error
dpm220_err.append(float((np.sqrt((blankcorr220_err[-1]/eff_df.Average_E220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()])**2 + ((lvl1_main_df['final220'][i]*eff_df.Standard_Deviation_E220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()])/eff_df.Average_E220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]**2)**2))))
##############################################
#dpm219_thstdonly calculation using
dpm219_thstdonly.append(float(blankcorr219[-1]/eff_df.E219_from_E220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]))
#dpm219_thstdonly error
dpm219_thstdonly_err.append(float((np.sqrt((blankcorr219_err[-1]/eff_df.E219_from_E220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()])**2 + ((lvl1_main_df['final219'][i]*eff_df.E219_from_E220_uncertainty[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()])/eff_df.E219_from_E220[eff_df.Detector == lvl1_main_df['Detector_Name'][i].lower()]**2)**2))))
################################################
if float(lvl1_main_df[sample_volume][i]) > 0:
#219 Volume corrections
vdpm219.append((dpm219[-1]/float(lvl1_main_df[sample_volume][i]))*1000)
#219 Volume corrections error
vdpm219_err.append(np.sqrt((dpm219_err[-1]/float(lvl1_main_df[sample_volume][i]))**2 + ((dpm219[-1]*float(lvl1_main_df[sample_volume_error][i]))/(float(lvl1_main_df[sample_volume][i])**2))**2)*1000)
#220 Volume corrections
vdpm220.append((dpm220[-1]/float(lvl1_main_df[sample_volume][i]))*1000)
#220 Volume corrections error
vdpm220_err.append(np.sqrt((dpm220_err[-1]/float(lvl1_main_df[sample_volume][i]))**2 + ((dpm220[-1]*float(lvl1_main_df[sample_volume_error][i]))/(float(lvl1_main_df[sample_volume][i])**2))**2)*1000)
###############################################################################
#dpm219_thstdonly Volume corrections
vdpm219_thstdonly.append((dpm219_thstdonly[-1]/float(lvl1_main_df[sample_volume][i]))*1000)
#dpm219_thstdonly_err Volume corrections error
vdpm219_thstdonly_err.append(np.sqrt((dpm219_thstdonly_err[-1]/float(lvl1_main_df[sample_volume][i]))**2 + ((dpm219_thstdonly[-1]*float(lvl1_main_df[sample_volume_error][i]))/(float(lvl1_main_df[sample_volume][i])**2))**2)*1000)
if lvl1_main_df['Slope'][i] > -999:
#dpm226 calculation
vdpm226.append(((lvl1_main_df['Slope'][i]/lvl1_main_df['Detector 226 Calibration Factor'][i])/float(lvl1_main_df[sample_volume][i]))*1000)
vdpm226[-1] = vdpm226[-1]/float(lvl1_main_df['Detector 226 Efficiency'][i])
#dpm226_err calculation
vdpm226_err.append(np.sqrt(((lvl1_main_df['stderr_slope'][i]/lvl1_main_df['Detector 226 Calibration Factor'][i])/float(lvl1_main_df[sample_volume][i]))**2 + (((lvl1_main_df['Slope'][i]/lvl1_main_df['Detector 226 Calibration Factor'][i])*float(lvl1_main_df[sample_volume_error][i]))/(float(lvl1_main_df[sample_volume][i])**2))**2)*1000)
else:
vdpm226.append(-999)
vdpm226_err.append(-999)
else:
vdpm219.append(-999)
vdpm219_err.append(-999)
vdpm220.append(-999)
vdpm220_err.append(-999)
vdpm219_thstdonly.append(-999)
vdpm219_thstdonly_err.append(-999)
vdpm226.append(-999)
vdpm226_err.append(-999)
# if float(vdpm219_thstdonly[-1]) < 0 :
# vdpm219_thstdonly[-1] = 0
###############################################################################
#Time difference between sampling datetime and read datetime (t1 in Garcia-Solsona)
diff = pd.to_datetime(lvl1_main_df['Mid_Read_Datetime'][i]) - pd.to_datetime(lvl1_main_df['Mid_Sample_Datetime'][i])
t1.append((diff.seconds/60)+diff.days*24*60)
else:
print ('\n***ERROR***\nDetector name:', lvl1_main_df['Detector_Name'][i].lower(),'does not match a detector in detector_list\n')
#final219 - 219 channel blank for relevant detector
blankcorr219.append(-999)
#error associated with blankcorr219 correction added
blankcorr219_err.append(-999)
#final220 - 220 channel blank for relevant detector
blankcorr220.append(-999)
#error associated with blankcorr220 correction added
blankcorr220_err.append(-999)
#dpm219 calculation
dpm219.append(-999)
#dpm219 error
dpm219_err.append(-999)
#dpm220 calculation
dpm220.append(-999)
#dpm219 error
dpm220_err.append(-999)
#219 Volume corrections
vdpm219.append(-999)
#219 Volume corrections error
vdpm219_err.append(-999)
if vdpm219[-1] < 0 :
vdpm219[-1] = 0
#220 Volume corrections
vdpm220.append(-999)
#220 Volume corrections error
vdpm220_err.append(-999)
if vdpm220[-1] < 0 :
vdpm220[-1] = 0
##############################################
#dpm219 calculation using
dpm219_thstdonly.append(-999)
#dpm219 error
dpm219_thstdonly_err.append(-999)
################################################
###############################################################################
#219 Volume corrections
vdpm219_thstdonly.append(-999)
#219 Volume corrections error
vdpm219_thstdonly_err.append(-999)
if float(vdpm219_thstdonly[-1]) < 0 :
vdpm219_thstdonly[-1] = 0
###############################################################################
if float(lvl1_main_df[sample_volume][i]) > 0:
vdpm226.append(-999)
if vdpm226[-1]<0:
vdpm226[-1] = 0
vdpm226_err.append(-999)
else:
vdpm226.append(-999)
vdpm226_err.append(-999)
#Time difference between sampling datetime and read datetime (t1 in Garcia-Solsona)
diff = pd.to_datetime(lvl1_main_df['Mid_Read_Datetime'][i]) - | pd.to_datetime(lvl1_main_df['Mid_Sample_Datetime'][i]) | pandas.to_datetime |
from timeseries_preparation.preparation import TimeseriesPreparator
import pandas as pd
import pytest
def test_duplicate_dates():
df = pd.DataFrame(
{
"date": [
"2021-01-01 12:12:00",
"2021-01-01 17:35:00",
"2021-01-02 14:55:00",
],
"id": [1, 1, 1],
}
)
frequency = "D"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
)
with pytest.raises(ValueError):
dataframe_prepared = preparator._truncate_dates(df)
def test_minutes_truncation():
df = pd.DataFrame(
{
"date": [
"2021-01-01 12:17:42",
"2021-01-01 12:30:00",
"2021-01-01 12:46:00",
],
"id": [1, 1, 1],
}
)
frequency = "15min"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] == pd.Timestamp("2021-01-01 12:15:00")
assert dataframe_prepared[time_column_name][2] == pd.Timestamp("2021-01-01 12:45:00")
def test_hour_truncation():
df = pd.DataFrame(
{
"date": [
"2020-01-07 12:12:00",
"2020-01-07 17:35:00",
"2020-01-07 14:55:00",
"2020-01-07 18:06:00",
"2020-01-08 04:40:00",
"2020-01-08 06:13:00",
"2020-01-08 03:23:00",
],
"id": [1, 1, 1, 1, 2, 2, 2],
}
)
frequency = "2H"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
max_timeseries_length=2,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
dataframe_prepared = preparator._keep_last_dates(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] == pd.Timestamp("2020-01-07 16:00:00")
assert dataframe_prepared[time_column_name][3] == pd.Timestamp("2020-01-08 06:00:00")
def test_day_truncation():
df = pd.DataFrame(
{
"date": [
"2021-01-01 12:17:42",
"2021-01-02 00:00:00",
"2021-01-03 12:46:00",
],
"id": [1, 1, 1],
}
)
frequency = "D"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] == pd.Timestamp("2021-01-01")
assert dataframe_prepared[time_column_name][2] == pd.Timestamp("2021-01-03")
def test_business_day_truncation():
df = pd.DataFrame(
{
"date": [
"2021-01-04 12:17:42",
"2021-01-07 00:00:00",
"2021-01-12 12:46:00",
],
"id": [1, 1, 1],
}
)
frequency = "3B"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] == pd.Timestamp("2021-01-04")
assert dataframe_prepared[time_column_name][1] == pd.Timestamp("2021-01-07")
def test_week_sunday_truncation():
df = pd.DataFrame(
{
"date": [
"2021-01-03 12:12:00",
"2021-01-05 17:35:00",
"2021-01-15 14:55:00",
],
"id": [1, 1, 1],
}
)
frequency = "W-SUN"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
max_timeseries_length=2,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
dataframe_prepared = preparator._keep_last_dates(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] == pd.Timestamp("2021-01-10")
assert dataframe_prepared[time_column_name][1] == pd.Timestamp("2021-01-17")
def test_quarter_truncation():
df = pd.DataFrame(
{
"date": [
"2020-12-15",
"2021-03-28",
"2021-06-11",
],
"id": [1, 1, 1],
}
)
frequency = "3M"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] == pd.Timestamp("2020-12-31")
assert dataframe_prepared[time_column_name][2] == pd.Timestamp("2021-06-30")
def test_semester_truncation():
df = pd.DataFrame(
{
"date": [
"2020-12-15",
"2021-06-28",
"2021-12-01",
],
"id": [1, 1, 1],
}
)
frequency = "6M"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] == pd.Timestamp("2020-12-31")
assert dataframe_prepared[time_column_name][1] == pd.Timestamp("2021-06-30")
assert dataframe_prepared[time_column_name][2] == pd.Timestamp("2021-12-31")
def test_year_truncation():
df = pd.DataFrame(
{
"date": [
"2020-12-31",
"2021-12-15",
"2022-12-01",
],
"id": [1, 1, 1],
}
)
frequency = "12M"
time_column_name = "date"
timeseries_identifiers_names = ["id"]
df[time_column_name] = pd.to_datetime(df[time_column_name]).dt.tz_localize(tz=None)
preparator = TimeseriesPreparator(
time_column_name=time_column_name,
frequency=frequency,
timeseries_identifiers_names=timeseries_identifiers_names,
)
dataframe_prepared = preparator._truncate_dates(df)
dataframe_prepared = preparator._sort(dataframe_prepared)
preparator._check_regular_frequency(dataframe_prepared)
assert dataframe_prepared[time_column_name][0] == | pd.Timestamp("2020-12-31") | pandas.Timestamp |
import datetime
import os
import time
import pandas as pd
from mainapp.app_settings import datetime_format
def tell_to_stop():
f = open("mainapp/termination/stop_dir/stop_instrument_simulator.txt", 'w')
f.close()
def time_to_stop(stopping_dir, stopping_file):
files = next(os.walk(stopping_dir))[2]
if stopping_file in files:
os.remove(stopping_dir + '\\' + stopping_file)
return True
return False
def clean_up_stopping_dir(stopping_dir, stopping_file):
files = next(os.walk(stopping_dir))[2]
for file in files:
if file == stopping_file:
os.remove(stopping_dir + '\\' + stopping_file)
def run(measurements_per_second=None):
stopping_dir = os.getcwd() + '\\mainapp\\termination\\stop_dir'
stopping_file = 'stop_instrument_simulator.txt'
clean_up_stopping_dir(stopping_dir, stopping_file)
wait_between_measurements = True
if measurements_per_second is None:
wait_between_measurements = False
relative_location = "\\mainapp\\instrument_simulator\\projects\\"
dir = os.getcwd() + relative_location
project_name = "ObsFlow1DaysFakeTime.csv"
# Setting directories
projectName = project_name
file = dir + projectName
dir_temp = 'incoming_data'
# Getting the project file
df = | pd.read_csv(file, index_col=False) | pandas.read_csv |
# Import packages
import os
import pandas as pd
import scipy
from scipy.optimize import curve_fit
import hplib as hpl
# Functions
def import_heating_data():
# read in keymark data from *.txt files in /input/txt/
# save a dataframe to database_heating.csv in folder /output/
Modul = []
Manufacturer = []
Date = []
Refrigerant = []
Mass = []
Poff = []
Psb = []
Prated = []
SPLindoor = []
SPLoutdoor = []
Type = []
Climate = []
Guideline = []
T_in = []
T_out = []
P_th = []
COP = []
df = pd.DataFrame()
os.chdir('../')
root = os.getcwd()
Scanordner = (root + '/input/txt')
os.chdir(Scanordner)
Scan = os.scandir(os.getcwd())
with Scan as dir1:
for file in dir1:
with open(file, 'r', encoding='utf-8') as f:
contents = f.readlines()
date = 'NaN'
modul = 'NaN'
prated_low = 'NaN'
prated_medium = 'NaN'
heatpumpType = 'NaN'
refrigerant = 'NaN'
splindoor_low = 'NaN'
splindoor_medium = 'NaN'
sploutdoor_low = 'NaN'
sploutdoor_medium = 'NaN'
poff = 'NaN'
climate = 'NaN'
NumberOfTestsPerNorm = []
NumberOfTestsPerModule = []
i = 1 # indicator for the line wich is read
d = 0 # indicator if only medium Temperature is given
p = 0 # -15° yes or no
date = contents[1]
date = date[61:]
if (date == '17 Dec 2020\n'):
date = '17.12.2020\n'
if (date == '18 Dec 2020\n'):
date = '18.12.2020\n'
if (date.startswith('5 Mar 2021')):
date = '05.03.2021\n'
if (date.startswith('15 Feb 2021')):
date = '15.02.2021\n'
if (date.startswith('22 Feb 2021')):
date = '22.02.2021\n'
for lines in contents:
i = i + 1
if (lines.startswith('Name\n') == 1):
manufacturer = (contents[i])
if (manufacturer.find('(') > 0):
manufacturer = manufacturer.split('(', 1)[1].split('\n')[0]
if manufacturer.endswith('GmbH\n'):
manufacturer = manufacturer[:-5]
if manufacturer.endswith('S.p.A.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('s.p.a.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('S.p.A\n'):
manufacturer = manufacturer[:-5]
if manufacturer.endswith('S.L.U.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('s.r.o.\n'):
manufacturer = manufacturer[:-6]
if manufacturer.endswith('S.A.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('S.L.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('B.V.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('N.V.\n'):
manufacturer = manufacturer[:-4]
if manufacturer.endswith('GmbH & Co KG\n'):
manufacturer = manufacturer[:-12]
elif manufacturer.startswith('NIBE'):
manufacturer = 'Nibe\n'
elif manufacturer.startswith('Nibe'):
manufacturer = 'Nibe\n'
elif manufacturer.startswith('Mitsubishi'):
manufacturer = 'Mitsubishi\n'
elif manufacturer.startswith('Ochsner'):
manufacturer = 'Ochsner\n'
elif manufacturer.startswith('OCHSNER'):
manufacturer = 'Ochsner\n'
elif manufacturer.startswith('Viessmann'):
manufacturer = 'Viessmann\n'
elif (lines.endswith('Date\n') == 1):
date = (contents[i])
if (date == 'basis\n'):
date = contents[i - 3]
date = date[14:]
elif (lines.startswith('Model') == 1):
modul = (contents[i - 2])
splindoor_low = 'NaN'
splindoor_medium = 'NaN'
sploutdoor_low = 'NaN'
sploutdoor_medium = 'NaN'
elif lines.endswith('Type\n'):
heatpumpType = contents[i][:-1]
if heatpumpType.startswith('A'):
heatpumpType = 'Outdoor Air/Water'
if heatpumpType.startswith('Eau glycol'):
heatpumpType = 'Brine/Water'
elif (lines.startswith('Sound power level indoor')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Sound power level outdoor')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Puissance acoustique extérieure')):
b = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Potencia sonora de la unidad interior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Potencia sonora de la unidad exterior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Nivel de Potência sonora interior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Nivel de Potência sonora exterior')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines.startswith('Livello di potenza acustica interna')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
splindoor_low = contents[i + 4][:-7]
splindoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
splindoor_medium = contents[i + 4][:-7]
splindoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
splindoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
splindoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
splindoor_medium = contents[i + 2][:-7]
else:
splindoor_low = contents[i][:-7]
splindoor_medium = contents[i][:-7]
elif (lines.startswith('Livello di potenza acustica externa')):
SPL = 1
if (contents[i].startswith('Low')):
if contents[i + 2].startswith('Medium'):
sploutdoor_low = contents[i + 4][:-7]
sploutdoor_medium = contents[i + 6][:-7]
if contents[i].startswith('Medium'):
sploutdoor_medium = contents[i + 4][:-7]
sploutdoor_low = contents[i + 6][:-7]
elif (contents[i].endswith('dB(A)\n')):
if (contents[i - 3].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 3].startswith('Medium')):
sploutdoor_medium = contents[i][:-7]
if (contents[i - 6].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 6].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
if (contents[i - 4].startswith('Low')):
sploutdoor_low = contents[i][:-7]
if (contents[i - 4].startswith('Medium')):
sploutdoor_medium = contents[i + 2][:-7]
else:
sploutdoor_low = contents[i][:-7]
sploutdoor_medium = contents[i][:-7]
elif (lines == 'Refrigerant\n'):
if (contents[i - 3] == 'Mass Of\n'):
continue
refrigerant = (contents[i])
elif (lines.startswith('Mass Of') == 1):
if (lines == 'Mass Of\n'):
mass = contents[i + 1]
elif (lines.endswith('kg\n') == 1):
mass = contents[i - 2]
mass = mass[20:]
else:
mass = contents[i]
elif lines.startswith('Average'):
climate = 'average'
elif lines.startswith('Cold'):
climate = 'cold'
elif lines.startswith('Warmer Climate'):
climate = 'warm'
elif (lines.startswith('EN') == 1):
if (p == 1):
Poff.append(poff)
Psb.append(psb)
if (p == 2):
Poff.append(poff)
Poff.append(poff)
Psb.append(psb)
Psb.append(psb_medium)
guideline = (contents[i - 2])
d = 0 # Medium or Low Content
p = 0 # -15 yes or no
NumberOfTestsPerNorm = []
if (contents[i - 1].startswith('Low') == 1):
d = 0
continue
if (contents[i - 1] == '\n'):
continue
if (contents[i - 1].startswith('Medium')):
d = 1
else:
d = 0
if lines.startswith('Prated'):
prated_low = contents[i][:-4]
if (contents[i + 2].endswith('kW\n')):
prated_medium = contents[i + 2][:-4]
elif (lines.startswith('Pdh Tj = -15°C') == 1): # check
if (contents[i].endswith('Cdh\n') == 1): # wrong content
continue
if (contents[i] == '\n'): # no content
continue
else:
minusfifteen_low = contents[i]
P_th.append(minusfifteen_low[:-4])
T_in.append('-15')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('35')
elif (climate == 'cold'):
T_out.append('32')
elif (climate == 'warm'):
T_out.append('35')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('55')
elif (climate == 'cold'):
T_out.append('49')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
Mass.append(mass[:-4])
Prated.append(prated_low)
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Guideline.append(guideline[:-1])
Climate.append(climate)
Type.append(heatpumpType)
if (contents[i + 2].startswith('COP')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('Disclaimer')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('EHPA')): # End of page
if (contents[i + 8].startswith('COP')): # end of page plus no medium heat
continue
minusfifteen_medium = contents[i + 2]
P_th.append(minusfifteen_medium[:-4])
T_in.append('-15')
if (climate == 'average'):
T_out.append('55')
elif (climate == 'cold'):
T_out.append('49')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
Mass.append(mass[:-4])
Prated.append(prated_medium)
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = -15°C')):
if (contents[i] == '\n'):
continue
if (contents[i].startswith('EHPA')):
continue
COP.append(contents[i][:-1])
NumberOfTestsPerModule.append(i)
p = 1
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
if (contents[i + 2].startswith('EHPA')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerModule.append(i)
p = 2
elif (lines.startswith('Pdh Tj = -7°C') == 1): # check
minusseven_low = contents[i]
P_th.append(minusseven_low[:-4])
T_in.append('-7')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('34')
elif (climate == 'cold'):
T_out.append('30')
elif (climate == 'warm'):
T_out.append('35')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('52')
elif (climate == 'cold'):
T_out.append('44')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
Mass.append(mass[:-4])
Prated.append(prated_low)
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
if (contents[i + 2].startswith('COP') == 1):
continue
else:
minusseven_medium = contents[i + 2]
P_th.append(minusseven_medium[:-4])
T_in.append('-7')
if (climate == 'average'):
T_out.append('52')
elif (climate == 'cold'):
T_out.append('44')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = -7°C')):
COP.append(contents[i][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
elif (lines.startswith('Pdh Tj = +2°C') == 1):
if (contents[i].endswith('Cdh\n') == 1): # wrong content
continue
if (contents[i] == '\n'): # no content
continue
else:
plustwo_low = contents[i]
P_th.append(plustwo_low[:-4])
T_in.append('2')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('30')
elif (climate == 'cold'):
T_out.append('27')
elif (climate == 'warm'):
T_out.append('35')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('42')
elif (climate == 'cold'):
T_out.append('37')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_low)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
if (contents[i + 2].startswith('COP')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('Disclaimer')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('EHPA')): # End of page
if (contents[i + 8].startswith('COP')): # end of page plus no medium heat
continue
plustwo_medium = contents[i + 2]
# if(plustwo_low[:-1].endswith('kW')==0):#test
# print(plustwo_low[:-1])
# if(plustwo_medium[:-1].endswith('kW')==0):#test
# print(file.name)#plustwo_medium[:-1]
P_th.append(plustwo_medium[:-4])
T_in.append('2')
if (climate == 'average'):
T_out.append('42')
elif (climate == 'cold'):
T_out.append('37')
elif (climate == 'warm'):
T_out.append('55')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = +2°C')): # check
if (contents[i] == '\n'): # no infos
continue
if (contents[i].startswith('EHPA')): # end of page
print(file.name)
continue
if (contents[i + 2].startswith('Warmer')): # usless infos
continue
if (contents[i] == 'n/a\n'): # usless infos
continue
COP.append(contents[i][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
if (contents[i + 2].startswith('EHPA')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
elif (lines.startswith('Pdh Tj = +7°C') == 1):
if (contents[i].endswith('Cdh\n') == 1): # wrong content
continue
if (contents[i] == '\n'): # no content
continue
else:
plusseven_low = contents[i]
P_th.append(plusseven_low[:-4])
T_in.append('7')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('27')
elif (climate == 'cold'):
T_out.append('25')
elif (climate == 'warm'):
T_out.append('31')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('36')
elif (climate == 'cold'):
T_out.append('32')
elif (climate == 'warm'):
T_out.append('46')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_low)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
if (contents[i + 2].startswith('COP')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('Disclaimer')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('EHPA')): # End of page
if (contents[i + 8].startswith('COP')): # end of page plus no medium heat
continue
plusseven_medium = contents[i + 2]
P_th.append(plusseven_medium[:-4])
T_in.append('7')
if (climate == 'average'):
T_out.append('36')
elif (climate == 'cold'):
T_out.append('32')
elif (climate == 'warm'):
T_out.append('46')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = +7°C')): # check
if (contents[i] == '\n'): # no infos
continue
if (contents[i].startswith('EHPA')): # end of page
continue
if (contents[i + 2].startswith('Warmer')): # usless infos
continue
if (contents[i] == 'n/a\n'): # usless infos
continue
COP.append(contents[i][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
if (contents[i + 2].startswith('EHPA')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
elif (lines.startswith('Pdh Tj = 12°C') == 1):
if (contents[i].endswith('Cdh\n') == 1): # wrong content
continue
if (contents[i] == '\n'): # no content
continue
if (contents[i].startswith('EHPA Secretariat') == 1):
plustwelfe_low = (contents[i - 11])
P_th.append(plustwelfe_low[:-4])
T_in.append('12')
if (climate == 'average'):
T_out.append('24')
elif (climate == 'cold'):
T_out.append('24')
elif (climate == 'warm'):
T_out.append('26')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
SPLindoor.append(splindoor_low)
# SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_low)
# SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_low)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
plustwelfe_medium = (contents[i - 9])
P_th.append(plustwelfe_medium[:-4])
T_in.append('12')
if (climate == 'average'):
T_out.append('30')
elif (climate == 'cold'):
T_out.append('28')
elif (climate == 'warm'):
T_out.append('34')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
# SPLoutdoor.append(sploutdoor_low)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
else:
plustwelfe_low = contents[i]
P_th.append(plustwelfe_low[:-4])
T_in.append('12')
if d == 0: # first low than medium Temperatur
if (climate == 'average'):
T_out.append('24')
elif (climate == 'cold'):
T_out.append('24')
elif (climate == 'warm'):
T_out.append('26')
if d == 1: # first medium Temperature
if (climate == 'average'):
T_out.append('30')
elif (climate == 'cold'):
T_out.append('28')
elif (climate == 'warm'):
T_out.append('34')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
SPLindoor.append(splindoor_low)
SPLoutdoor.append(sploutdoor_low)
Mass.append(mass[:-4])
Prated.append(prated_low)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
if (contents[i + 2].startswith('COP')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('Disclaimer')): # for PDF without medium heat
continue
if (contents[i + 2].startswith('EHPA')): # End of page
if (contents[i + 8].startswith('COP')): # end of page plus no medium heat
continue
plustwelfe_medium = contents[i + 2]
P_th.append(plustwelfe_medium[:-4])
T_in.append('12')
if (climate == 'average'):
T_out.append('30')
elif (climate == 'cold'):
T_out.append('28')
elif (climate == 'warm'):
T_out.append('34')
Modul.append(modul[7:-1])
Manufacturer.append(manufacturer[:-1])
Date.append(date[:-1])
Refrigerant.append(refrigerant[:-1])
# SPLindoor.append(splindoor_low)
SPLindoor.append(splindoor_medium)
SPLoutdoor.append(sploutdoor_medium)
Mass.append(mass[:-4])
Prated.append(prated_medium)
Type.append(heatpumpType)
Guideline.append(guideline[:-1])
Climate.append(climate)
elif (lines.startswith('COP Tj = 12°C')): # check
if (contents[i] == '\n'): # no infos
continue
if (contents[i].startswith('EHPA')): # end of page
print('W')
continue
if (contents[i + 2].startswith('Warmer')): # usless infos
continue
if (contents[i] == 'n/a\n'): # usless infos
continue
COP.append(contents[i][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
if (contents[i + 2].startswith('Pdh')): # no medium Climate
continue
if (contents[i + 2].startswith('Cdh')): # no medium Climate
continue
if (contents[i + 2].startswith('EHPA')): # no medium Climate
continue
COP.append(contents[i + 2][:-1])
NumberOfTestsPerNorm.append(i)
NumberOfTestsPerModule.append(i)
elif (lines.startswith('Poff')):
l = 0 # l shows if Poff Medium is different to Poff Low Temperature
c = 2 # c is just an iterator to print every second Poff
poff = contents[i][:-2]
if poff.endswith(' '):
poff = poff[:-1]
if poff.endswith('.00'):
poff = poff[:-3]
second_poff = contents[i + 2][:-2]
if second_poff.endswith(' '):
second_poff = second_poff[:-1]
if second_poff.endswith('.00'):
second_poff = second_poff[:-3]
if (poff != second_poff): # see if Poff Medium to Poff low
if (contents[i + 2].endswith('W\n')):
if (contents[i + 2] != 'W\n'):
l = 1
for Tests in NumberOfTestsPerNorm:
if l == 0:
Poff.append(poff)
if l == 1:
c += 1
if c % 2 == 1:
Poff.append(poff)
if c % 2 == 0:
Poff.append(second_poff)
elif (lines.startswith('PSB')):
l = 0 # l shows if Poff Medium is different to Poff Low Temperature
c = 2 # c is just an iterator to print every second Poff
psb = contents[i][:-2]
if psb.endswith(' '):
psb = psb[:-1]
if psb.endswith('.00'):
psb = psb[:-3]
psb_medium = contents[i + 2][:-2]
if psb_medium.endswith(' '):
psb_medium = psb_medium[:-1]
if psb_medium.endswith('.00'):
psb_medium = psb_medium[:-3]
if (psb != psb_medium): # see if Poff Medium to Poff low
if (contents[i + 2].endswith('W\n')):
if (contents[i + 2] != 'W\n'):
l = 1
for Tests in NumberOfTestsPerNorm:
if l == 0:
Psb.append(psb)
if l == 1:
c += 1
if c % 2 == 1:
Psb.append(psb)
if c % 2 == 0:
Psb.append(psb_medium)
if p == 1:
Poff.append(poff)
Psb.append(psb)
if p == 2:
Poff.append(poff)
Poff.append(second_poff)
Psb.append(psb)
Psb.append(psb_medium)
df['Manufacturer'] = Manufacturer
df['Model'] = Modul
df['Date'] = Date
df['Date'] = | pd.to_datetime(df['Date'], format='%d.%m.%Y') | pandas.to_datetime |
import pandas as pd
# FIXME: Not realistic as would like to adjust positions everyday
def trade_summary(df_input, security_name, position_name):
"""
For static positions only, i.e. at any time a fixed unit of positions are live.
i.e. if on day 0, 100 unit is bought, the unit will be kept at 100 throughout live signal
Take a dataframe with timestamp index, security, and security_pos (position) and calculate PnL trade by trade.
"""
df = df_input.copy()
df["long_short"] = (df[position_name] > 0) * 1 - (df[position_name] < 0) * 1
trade_detail = []
def update_trade(_trade_count, _position, _open_date, _open_price, _close_date, _close_price):
trade_detail.append({"trade": _trade_count,
"position": _position,
"open_date": _open_date,
"open_price": _open_price,
"close_date": _close_date,
"close_price": _close_price,
"realized_pnl": _position * (_close_price - open_price)})
trade_count = 0
long_short = 0
for i, data_slice in enumerate(df.iterrows()):
s = data_slice[1] # Slice
if i > 0 and s.long_short != df.iloc[i - 1].long_short:
if long_short != 0:
close_price, close_date = s[security_name], s.name
update_trade(trade_count, position, open_date, open_price, close_date, close_price)
long_short = 0
if s.long_short != 0:
open_price = s[security_name]
position = s[position_name]
open_date = s.name # date/time from index
trade_count += 1
long_short = s.long_short
if s.long_short != long_short:
close_price, close_date = s[security_name], s.name
close_date = s.name
update_trade(trade_count, position, open_date, open_price, close_date, close_price)
trade_summary_df = pd.DataFrame(trade_detail)
# Merge realized PnL onto original time_series. TODO: Can consider returning only one single series
trade_time_series = trade_summary_df[["close_date", "realized_pnl"]]
trade_time_series = trade_time_series.set_index("close_date")
trade_time_series.index.name = df_input.index.name
# TODO: AMEND DATETIME FORMAT WHEN NECESSARY
trade_time_series.index = pd.to_datetime(trade_time_series.index, format="%d/%m/%Y")
trade_time_series = | pd.concat([trade_time_series, df], axis=1) | pandas.concat |
# ! /usr/bin/env python3
# -*- coding: utf-8 -*-
# AUTHOR: <NAME>, EMBL
# converter.py
# To further usage, use manual supplied together with CLEMSite
import numpy as np
from sklearn.manifold import TSNE
import pylab as Plot
import pandas as pd
import glob
from os import listdir
from os.path import isfile, join
import os, shutil
import seaborn as sns
from tqdm import tqdm
import matplotlib.patches as mpatches
import holoviews as hv
from holoviews import streams
hv.extension('bokeh')
import cv2
import time
from skimage import data, exposure, img_as_float
############################################## LOADING DATA ############################################
### Set here treatment names
_treatments = ["COPB2", "WDR75", "DNM1", "COPG1", "C1S", "DENND4C", "IPO8", "SRSF1", "Neg9", "FAM177B", "ACTR3",
"PTBP1", "DNM1", "NT5C", "PTBP1", "ARHGAP44", "Neg9", "ACTR3", "SRSF1", "C1S", "IPO8", "WDR75", "NT5C",
"FAM177B", "COPB1", "ARHGAP44", "Neg9", "GPT", "KIF11", "GPT", "DENND4C", "AURKB"]
_features = ['Metadata_BaseFileName', 'FileName_ColorImage', 'Location_Center_X', 'Location_Center_Y',
'Mean_Golgi_AreaShape_Center_X', 'Mean_Golgi_AreaShape_Center_Y', 'Mean_Nuclei_AreaShape_Solidity',
'Metadata_U', 'Metadata_V', 'Metadata_X', 'Metadata_Y', 'ImageQuality_PowerLogLogSlope_Dna',
'Intensity_IntegratedIntensity_GolgiBGCorr', 'Mean_Nuclei_Math_CV', 'Math_Blobness', 'Math_Diffuseness',
'Children_Golgi_Count', 'Mean_MaxAreaGolgi_AreaShape_FormFactor']
treatment_column = 'Gene'
treatment_index = 'Metadata_Y'
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
########################################################################################################
def loadData(regenerate_crops=True, no_treatments=False, subfolder=None):
"""
regenerate_crops = rewrites crops. Put it to false to save computation time if you are reloading the data from a previous selection.
no_treatment = if no treatments and there is a full population of cells.
"""
data_folder = os.getcwd()
if subfolder:
data_folder = os.path.join(data_folder, subfolder)
df = pd.DataFrame()
dft = getDataTables(data_folder, not no_treatments)
dft = cropImages(data_folder, dft, subfolder =subfolder, regenerate_crops=True)
df = pd.concat([df, dft], ignore_index=True)
print("-- DATA on " + data_folder + " LOADED --")
return df
def getDataTables(dirSample, treatment=True):
"""
dirSample : directory where info from CellProfiler was stored. --cp is the folder and it has to be subdivided in the folder tables and images.
treatment : to split data by treatment. Follows from method loadData
"""
flist = glob.glob(dirSample + "\\*--cp*")
flistdir = [f for f in flist if os.path.isdir(f)]
if len(flistdir) == 0:
raise Exception('--cp folder not found in :' + dirSample)
flist2 = glob.glob(flistdir[0] + "\\*tables*")
flistdir2 = [f for f in flist2 if os.path.isdir(f)]
if flistdir2 is None:
raise Exception('Tables folder not found :' + dirSample)
return getData(flistdir2[0], treatment)
def getData(mainfolder, treatment=True):
all_data = all_data_in_one_table(mainfolder)
all_data = all_data[_features]
all_data = all_data.dropna()
if treatment:
for ind, el in enumerate(_treatments):
all_data.loc[all_data[treatment_index] == ind, treatment_column] = el
else:
all_data[treatment_column] = 'UMO' # Unknown
# Remove original columns
return all_data
def cropImages(data_sample, dft, subfolder = None, spacing=50, regenerate_crops=True, color=(25, 255, 255)):
"""
Searches for a folder with --cp and adds the address of the crop to the dataframe.
"""
# For every row, find the image
flist = glob.glob(data_sample + "\\*--cp*")
flistdir = [f for f in flist if os.path.isdir(f)]
if len(flistdir) == 0:
raise Exception('--cp folder not found in :' + data_sample)
flist2 = glob.glob(flistdir[0] + "\\*images*")
flistimages = [f for f in flist2 if os.path.isdir(f)]
if flistimages is None:
raise Exception('Images folder not found :' + data_sample)
# Create a folder for crops
dir_to_save = data_sample + "\\crops"
try:
os.makedirs(dir_to_save)
except FileExistsError:
if (regenerate_crops):
shutil.rmtree(dir_to_save)
try:
# Try to recreate
os.makedirs(dir_to_save, exist_ok=True)
except OSError as e:
# If problem is that directory still exists, wait a bit and try again
if e.winerror == 183:
time.sleep(0.01)
else:
raise
dft["img_path"] = ""
dft["img_name_raw"] = ""
dft = dft.reset_index(drop=True)
for row in tqdm(dft.itertuples()):
ind = row.Index
imname = dft.at[ind, 'FileName_ColorImage']
image_name = os.path.join(flistimages[0],str(imname))
crop_name = imname[:-4] + "_" + str(ind) + ".png"
# Load image
if os.path.exists(image_name):
if regenerate_crops:
img = cv2.imread(image_name)
img = cv2.normalize(img, img, 30, 250, cv2.NORM_MINMAX) # autoBC(img)
x = dft.at[ind, 'Location_Center_X']
y = dft.at[ind, 'Location_Center_Y']
# Crop around center of object (90 by 90 pixels, is more than enough)
xmin = int(np.max([0, x - spacing]))
xmax = int(np.min([img.shape[0] - 1, x + spacing]))
ymin = int(np.max([0, y - spacing]))
ymax = int(np.min([img.shape[1] - 1, y + spacing]))
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)
cv2.imwrite(os.path.join(dir_to_save,crop_name), img)
dir_im = os.path.join(dir_to_save,crop_name)
dft.at[ind, "img_path"] = dir_im
if subfolder is not None:
dft.at[ind, "img_name_raw"] = "."+ os.path.sep + subfolder+ os.path.sep + "crops"+ os.path.sep + crop_name
else:
dft.at[ind,"img_name_raw"] = "."+os.path.sep + "crops" + os.path.sep + crop_name
else:
print("NO-" + image_name)
print(str(ind))
return dft
def findTables(foldname, reg_expr):
tables_list = []
dirs = []
dirs.append(foldname)
while dirs:
fname = dirs.pop()
fname_t = str(fname) + "\\" + reg_expr
flist = glob.glob(fname_t)
if not flist:
newdirs = ([f for f in glob.glob(fname + '\\*') if os.path.isdir(f)])
for el in newdirs:
dirs.append(el)
else:
for el in flist:
tables_list.append(el)
return tables_list
def all_data_in_one_table(folder_tables):
tables_list = findTables(folder_tables, '*objects.csv*')
tables_list_images = findTables(folder_tables, '*images.csv*')
data_frames = []
# Only one file containing everything
if len(tables_list_images) == 1 and len(tables_list) == 1:
with open(tables_list[0], 'r') as myfile:
data = myfile.read().replace('\"', '')
with open(tables_list[0], 'w') as myfile:
myfile.write(data)
with open(tables_list_images[0], 'r') as myfile:
data = myfile.read().replace('\"', '')
with open(tables_list_images[0], 'w') as myfile:
myfile.write(data)
table = pd.read_csv(tables_list[0], ',')
images = pd.read_csv(tables_list_images[0], ',')
# merged_df = table.join(images,on='ImageNumber',rsuffix='_other') #ImageNumber
merged_df = pd.merge(left=table, right=images, on=(
'ImageNumber', 'Metadata_BaseFileName', 'Metadata_U', 'Metadata_V', 'Metadata_X', 'Metadata_Y'))
else:
# For each file in folder tables select objects
# compile the list of dataframes you want to merge
tables_list = sorted(tables_list, key=str.lower)
tables_list_images = sorted(tables_list_images, key=str.lower)
for table_file, image_file in zip(tables_list, tables_list_images):
with open(table_file, 'r') as myfile:
data = myfile.read().replace('\"', '')
with open(table_file, 'w') as myfile:
myfile.write(data)
with open(image_file, 'r') as myfile:
data = myfile.read().replace('\"', '')
with open(image_file, 'w') as myfile:
myfile.write(data)
table = pd.read_csv(table_file, ',')
images = pd.read_csv(image_file, ',')
# merge in big table
ftable = table.merge(images)
data_frames.append(ftable)
merged_df = pd.concat(data_frames)
return merged_df
####################################### PLOTTING ###############################################################3
from bokeh.layouts import layout
import bokeh
from bokeh.io import curdoc
def grouped(iterable, n):
"s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."
return zip(*[iter(iterable)] * n)
def plotQC(df, value, title, size=10, jitter=0.35, factor_reduce=0.5):
df_i = df.dropna(subset=[value])
df_i = df_i.reset_index(drop=True)
key_dimensions = [(value, title)]
value_dimensions = [('Gene', 'Gene'), ('Metadata_X', 'Position')]
macro = hv.Table(df_i, key_dimensions, value_dimensions)
options = dict(color_index='Position', legend_position='left', jitter=jitter, width=1000, height=600,
scaling_method='width', scaling_factor=2, size_index=2, show_grid=True,
tools=['hover', 'box_select', 'lasso_select'], line_color='k', cmap='Category20', size=size,
nonselection_color='lightskyblue')
quality_scatter = macro.to.scatter('Gene', [title]).options(**options)
sel = streams.Selection1D(source=quality_scatter)
image_name = df_i.loc[0, "img_name_raw"]
img = cv2.imread(image_name, 0)
h, w = img.shape
w = int(factor_reduce * w)
h = int(factor_reduce * h)
pad = int(2.2 * w)
def selection_callback(index):
if not index:
return hv.Div("")
divtext = f'<table width={pad} border=1 cellpadding=10 align=center valign=center>'
for i, j in grouped(index, 2):
value_s = '{:f}'.format(df_i[value][i])
value_s2 = '{:f}'.format(df_i[value][j])
divtext += '<tr>'
divtext += f'<td align=center valign=center><br> {i} Value: {value_s}</br></td>' + "\n"
divtext += f'<td align=center valign=center><br> {j} Value: {value_s2}</br></td>' + "\n"
divtext += '</tr><tr>'
divtext += f'<td align=center valign=center><img src={df_i.loc[i, "img_name_raw"]} width={w} height={h}></td>'
divtext += f'<td align=center valign=center><img src={df_i.loc[j, "img_name_raw"]} width={w} height={h}></td>'
divtext += '</tr>'
if len(index) % 2 == 1:
value_s = '{:f}'.format(df_i[value][index[-1]])
divtext += '<tr>'
divtext += f'<td align=center valign=center><br> {index[-1]} Value: {value_s}</br></td>' + "\n"
divtext += f'<td align=center valign=center><br> </br></td>' + "\n"
divtext += '</tr><tr>'
divtext += f'<td align=center valign=center><img src={df_i.loc[index[-1], "img_name_raw"]} width={w} height={h}></td>'
divtext += f'<td align=center valign=center></td>'
divtext += '</tr>'
divtext += '</table>'
return hv.Div(str(divtext))
div = hv.DynamicMap(selection_callback, streams=[sel])
hv.streams.PlotReset(source=quality_scatter, subscribers=[lambda reset: sel.event(index=[])])
return hv.Layout(quality_scatter + div).cols(1), sel
def applyQC(idf, column, vmin=None, vmax=None):
print("QC for " + str(column))
if vmin is None:
vmin = -np.inf
if vmax is None:
vmax = np.inf
print("Values applied :[" + str(vmin) + "," + str(vmax) + "].")
print("Original number of cells:" + str(len(idf)))
idf = idf.dropna(subset=[column])
idf = idf[idf[column] > vmin]
idf = idf[idf[column] < vmax]
print("After applying control " + column + " :" + str(len(idf)))
print("---QC " + str(column) + " done.")
idf = idf.reset_index(drop=True)
return idf
def getZscore(all_data, hue=None, column=None, remove_outliers=True):
"""
:param all_data: dataframe
:param hue: if hue is not given, each column normalizes against itself : value - mean / std
if hue is given, then data is grouped by hue and normalized by group : value - mean(of its group)/std(of its group)
:param column: if column is given (hue must be given too), then the data is grouped by hue, and then grouped again by column.
For example we might want to group data first by study, and then by gene.
:param remove_outliers: If True, data passing 4 times std is removed. We decided to use 4 only to remove outliers that
are potential numerical errors, not from the standard distribution of the data.
:return:
"""
z_score = all_data
if hue is not None:
final_df = pd.DataFrame()
m_indexes = list(all_data[hue].unique())
for el in m_indexes:
data_calc = all_data.loc[all_data[hue] == el, :].copy()
if (column is not None):
m_indexes2 = list(data_calc[column].unique())
for el2 in m_indexes2:
data_calc2 = data_calc.loc[data_calc[column] == el2, :].copy()
calc_d = data_calc2.select_dtypes(include=numerics)
for col in calc_d:
if col != hue and col != column:
col_zscore = col + '_zscore'
data_calc2.loc[:, col_zscore] = (calc_d[col] - calc_d[col].mean()) / calc_d[col].std(ddof=0)
if remove_outliers:
data_calc2 = data_calc2[data_calc2[col_zscore] < 4 * data_calc2[col_zscore].std()]
final_df = pd.concat([final_df, data_calc2])
else:
calc_d = data_calc.select_dtypes(include=numerics)
for col in calc_d:
if col != hue:
col_zscore = col + '_zscore'
data_calc.loc[:, col_zscore] = (calc_d[col] - calc_d[col].mean()) / calc_d[col].std(ddof=0)
if remove_outliers:
data_calc = data_calc[data_calc[col_zscore] < 4 * data_calc[col_zscore].std()]
final_df = pd.concat([final_df, data_calc])
else:
for col in all_data:
col_zscore = col + '_zscore'
z_score[col_zscore] = (z_score[col] - z_score[col].mean()) / z_score[col].std(ddof=0)
final_df = z_score
return final_df
def getZscoreAgainstControl(all_data, hue, control, remove_outliers=True, drop_index=True):
"""
:param all_data: dataframe
:param remove_outliers: If True, data passing 4 times std is removed. We decided to use 4 only to remove outliers that
are potential numerical errors, not from the standard distribution of the data.
:return:
"""
final_df = pd.DataFrame()
m_indexes = list(all_data[hue].unique().astype('str'))
query_one = ""
for el in control:
if el in m_indexes:
query_one = query_one + hue + "==\'" + str(el) + "\'|"
else:
return
query_one = query_one[:-1] # remove last or
df_q = all_data.query(query_one).copy()
if remove_outliers:
for col in all_data.select_dtypes(include=numerics):
df_q = df_q[np.abs(df_q[col] - df_q[col].mean()) < np.abs(3 * df_q[col].std())]
if drop_index:
df_q = df_q.reset_index(drop=True)
all_data = df_q.copy()
eps = 1e-15
for el in m_indexes:
data_calc = all_data.query(hue + "==\'" + str(el) + "\'").copy()
data_calc = data_calc.reset_index(drop=drop_index)
for col in data_calc.select_dtypes(include=numerics):
if col != hue and col != 'index':
col_zscore = col + '_zscore'
data_calc[col_zscore] = (data_calc[col] - df_q[col].mean()) / (df_q[col].std(ddof=0) + eps)
if remove_outliers:
data_calc = data_calc[np.abs(data_calc[col_zscore]) < np.abs(
data_calc[col_zscore].mean() + 3 * data_calc[col_zscore].std())]
final_df = pd.concat([final_df, data_calc])
final_df = final_df.reset_index(drop=drop_index)
return final_df
def getZscoreAgainstControlPerColumn(all_data, hue, control, column):
"""
:param all_data: dataframe
:param remove_outliers: If True, data passing 4 times std is removed. We decided to use 4 only to remove outliers that
are potential numerical errors, not from the standard distribution of the data.
:return:
"""
final_df = pd.DataFrame()
m_indexes = list(all_data[hue].unique().astype('str'))
query_one = ""
for el in control:
if el in m_indexes:
query_one = query_one + hue + "==\'" + str(el) + "\'|"
else:
return
query_one = query_one[:-1] # remove last or
df_q = all_data.query(query_one).copy()
eps = 1e-15
if hue is not None:
for el in m_indexes:
data_calc = all_data.query(hue + "==\'" + str(el) + "\'").copy()
data_calc = data_calc.reset_index(drop=True)
if (column is not None):
m_indexes2 = list(data_calc[column].unique())
for el2 in m_indexes2:
data_calc2 = data_calc.loc[data_calc[column] == el2, :].copy()
data_calc2 = data_calc2.reset_index(drop=True)
calc_d = data_calc2.select_dtypes(include=numerics)
for col in calc_d:
if col != hue and col != column:
col_zscore = col + '_zscore'
df_q2 = df_q.query(column + "==\'" + str(el2) + "\'").copy()
data_calc2.loc[:, col_zscore] = (calc_d[col] - df_q2[col].mean()) / (
df_q2[col].std(ddof=0) + eps)
final_df = | pd.concat([final_df, data_calc2]) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
# TODO: could also box idx?
idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq)
exp = np.array([False, False, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == idx, exp)
exp = np.array([True, True, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != idx, exp)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > idx, exp)
exp = np.array([True, False, False, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < idx, exp)
exp = np.array([False, True, True, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= idx, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= idx, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi_mismatched_freq_raises(self, freq, box_with_array):
# GH#13200
# different base freq
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
msg = "Input has different freq=A-DEC from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="A")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="A") >= base
# TODO: Could parametrize over boxes for idx?
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="A")
rev_msg = (
r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=A-DEC\)"
)
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
# Different frequency
msg = "Input has different freq=4M from "
with pytest.raises(IncompatibleFrequency, match=msg):
base <= Period("2011", freq="4M")
with pytest.raises(IncompatibleFrequency, match=msg):
Period("2011", freq="4M") >= base
idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M")
rev_msg = r"Input has different freq=(M|2M|3M) from " r"PeriodArray\(freq=4M\)"
idx_msg = rev_msg if box_with_array is tm.to_array else msg
with pytest.raises(IncompatibleFrequency, match=idx_msg):
base <= idx
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
result = idx1 > Period("2011-02", freq=freq)
exp = np.array([False, False, False, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("2011-02", freq=freq) < idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 == Period("NaT", freq=freq)
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) == idx1
tm.assert_numpy_array_equal(result, exp)
result = idx1 != Period("NaT", freq=freq)
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = Period("NaT", freq=freq) != idx1
tm.assert_numpy_array_equal(result, exp)
idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq)
result = idx1 < idx2
exp = np.array([True, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx2
exp = np.array([False, False, False, False])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx2
exp = np.array([True, True, True, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 == idx1
exp = np.array([True, True, False, True])
tm.assert_numpy_array_equal(result, exp)
result = idx1 != idx1
exp = np.array([False, False, True, False])
tm.assert_numpy_array_equal(result, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_pi_cmp_nat_mismatched_freq_raises(self, freq):
idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq)
diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M")
msg = "Input has different freq=4M from Period(Array|Index)"
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 > diff
with pytest.raises(IncompatibleFrequency, match=msg):
idx1 == diff
# TODO: De-duplicate with test_pi_cmp_nat
@pytest.mark.parametrize("dtype", [object, None])
def test_comp_nat(self, dtype):
left = pd.PeriodIndex(
[pd.Period("2011-01-01"), pd.NaT, pd.Period("2011-01-03")]
)
right = pd.PeriodIndex([pd.NaT, pd.NaT, pd.Period("2011-01-03")])
if dtype is not None:
left = left.astype(dtype)
right = right.astype(dtype)
result = left == right
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = left != right
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == right, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(left != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != left, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(left < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > left, expected)
class TestPeriodSeriesComparisons:
def test_cmp_series_period_series_mixed_freq(self):
# GH#13200
base = Series(
[
Period("2011", freq="A"),
Period("2011-02", freq="M"),
Period("2013", freq="A"),
Period("2011-04", freq="M"),
]
)
ser = Series(
[
Period("2012", freq="A"),
Period("2011-01", freq="M"),
Period("2013", freq="A"),
Period("2011-05", freq="M"),
]
)
exp = Series([False, False, True, False])
tm.assert_series_equal(base == ser, exp)
exp = Series([True, True, False, True])
tm.assert_series_equal(base != ser, exp)
exp = Series([False, True, False, False])
tm.assert_series_equal(base > ser, exp)
exp = Series([True, False, False, True])
tm.assert_series_equal(base < ser, exp)
exp = Series([False, True, True, False])
tm.assert_series_equal(base >= ser, exp)
exp = Series([True, False, True, True])
tm.assert_series_equal(base <= ser, exp)
class TestPeriodIndexSeriesComparisonConsistency:
""" Test PeriodIndex and Period Series Ops consistency """
# TODO: needs parametrization+de-duplication
def _check(self, values, func, expected):
# Test PeriodIndex and Period Series Ops consistency
idx = pd.PeriodIndex(values)
result = func(idx)
# check that we don't pass an unwanted type to tm.assert_equal
assert isinstance(expected, (pd.Index, np.ndarray))
tm.assert_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_comp_period(self):
idx = PeriodIndex(
["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.Period("2011-03", freq="M")
exp = np.array([False, False, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, True, True, False], dtype=np.bool)
self._check(idx, f, exp)
def test_pi_comp_period_nat(self):
idx = PeriodIndex(
["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx"
)
f = lambda x: x == pd.Period("2011-03", freq="M")
exp = np.array([False, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") == x
self._check(idx, f, exp)
f = lambda x: x == pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT == x
self._check(idx, f, exp)
f = lambda x: x != pd.Period("2011-03", freq="M")
exp = np.array([True, True, False, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") != x
self._check(idx, f, exp)
f = lambda x: x != pd.NaT
exp = np.array([True, True, True, True], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT != x
self._check(idx, f, exp)
f = lambda x: pd.Period("2011-03", freq="M") >= x
exp = np.array([True, False, True, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x < pd.Period("2011-03", freq="M")
exp = np.array([True, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: x > pd.NaT
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
f = lambda x: pd.NaT >= x
exp = np.array([False, False, False, False], dtype=np.bool)
self._check(idx, f, exp)
# ------------------------------------------------------------------
# Arithmetic
class TestPeriodFrameArithmetic:
def test_ops_frame_period(self):
# GH#13043
df = pd.DataFrame(
{
"A": [pd.Period("2015-01", freq="M"), pd.Period("2015-02", freq="M")],
"B": [pd.Period("2014-01", freq="M"), pd.Period("2014-02", freq="M")],
}
)
assert df["A"].dtype == "Period[M]"
assert df["B"].dtype == "Period[M]"
p = pd.Period("2015-03", freq="M")
off = p.freq
# dtype will be object because of original dtype
exp = pd.DataFrame(
{
"A": np.array([2 * off, 1 * off], dtype=object),
"B": np.array([14 * off, 13 * off], dtype=object),
}
)
tm.assert_frame_equal(p - df, exp)
tm.assert_frame_equal(df - p, -1 * exp)
df2 = pd.DataFrame(
{
"A": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
"B": [pd.Period("2015-05", freq="M"), pd.Period("2015-06", freq="M")],
}
)
assert df2["A"].dtype == "Period[M]"
assert df2["B"].dtype == "Period[M]"
exp = pd.DataFrame(
{
"A": np.array([4 * off, 4 * off], dtype=object),
"B": np.array([16 * off, 16 * off], dtype=object),
}
)
tm.assert_frame_equal(df2 - df, exp)
tm.assert_frame_equal(df - df2, -1 * exp)
class TestPeriodIndexArithmetic:
# ---------------------------------------------------------------
# __add__/__sub__ with PeriodIndex
# PeriodIndex + other is defined for integers and timedelta-like others
# PeriodIndex - other is defined for integers, timedelta-like others,
# and PeriodIndex (with matching freq)
def test_parr_add_iadd_parr_raises(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
# An earlier implementation of PeriodIndex addition performed
# a set operation (union). This has since been changed to
# raise a TypeError. See GH#14164 and GH#13077 for historical
# reference.
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
def test_pi_sub_isub_pi(self):
# GH#20049
# For historical reference see GH#14164, GH#13077.
# PeriodIndex subtraction originally performed set difference,
# then changed to raise TypeError before being implemented in GH#20049
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="D", periods=5)
off = rng.freq
expected = pd.Index([-5 * off] * 5)
result = rng - other
tm.assert_index_equal(result, expected)
rng -= other
tm.assert_index_equal(rng, expected)
def test_pi_sub_pi_with_nat(self):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = rng[1:].insert(0, pd.NaT)
assert other[1:].equals(rng[1:])
result = rng - other
off = rng.freq
expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off])
tm.assert_index_equal(result, expected)
def test_parr_sub_pi_mismatched_freq(self, box_with_array):
rng = pd.period_range("1/1/2000", freq="D", periods=5)
other = pd.period_range("1/6/2000", freq="H", periods=5)
# TODO: parametrize over boxes for other?
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(IncompatibleFrequency):
rng - other
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1_d = "19910905"
p2_d = "19920406"
p1 = pd.PeriodIndex([p1_d], freq=tick_classes(n))
p2 = pd.PeriodIndex([p2_d], freq=tick_classes(n))
expected = pd.PeriodIndex([p2_d], freq=p2.freq.base) - pd.PeriodIndex(
[p1_d], freq=p1.freq.base
)
tm.assert_index_equal((p2 - p1), expected)
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(pd.offsets.YearEnd, "month"),
(pd.offsets.QuarterEnd, "startingMonth"),
(pd.offsets.MonthEnd, None),
(pd.offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
freq = offset(n, normalize=False, **kwds)
p1 = pd.PeriodIndex([p1_d], freq=freq)
p2 = pd.PeriodIndex([p2_d], freq=freq)
result = p2 - p1
expected = pd.PeriodIndex([p2_d], freq=freq.base) - pd.PeriodIndex(
[p1_d], freq=freq.base
)
tm.assert_index_equal(result, expected)
# -------------------------------------------------------------
# Invalid Operations
@pytest.mark.parametrize("other", [3.14, np.array([2.0, 3.0])])
@pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub])
def test_parr_add_sub_float_raises(self, op, other, box_with_array):
dti = pd.DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")
pi = dti.to_period("D")
pi = tm.box_expected(pi, box_with_array)
with pytest.raises(TypeError):
op(pi, other)
@pytest.mark.parametrize(
"other",
[
# datetime scalars
pd.Timestamp.now(),
pd.Timestamp.now().to_pydatetime(),
pd.Timestamp.now().to_datetime64(),
# datetime-like arrays
pd.date_range("2016-01-01", periods=3, freq="H"),
| pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels") | pandas.date_range |
# -*- coding: utf-8 -*-
"""
Consensus non-negative matrix factorization (cNMF) adapted from (Kotliar, et al. 2019)
"""
import numpy as np
import pandas as pd
import os, errno
import glob
import shutil
import datetime
import uuid
import itertools
import yaml
import subprocess
import scipy.sparse as sp
import warnings
from scipy.spatial.distance import squareform
from sklearn.decomposition import non_negative_factorization
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.utils import sparsefuncs
from sklearn.preprocessing import normalize
from fastcluster import linkage
from scipy.cluster.hierarchy import leaves_list
import matplotlib.pyplot as plt
import scanpy as sc
from ._version import get_versions
def save_df_to_npz(obj, filename):
"""
Saves numpy array to `.npz` file
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
np.savez_compressed(
filename,
data=obj.values,
index=obj.index.values,
columns=obj.columns.values,
)
def save_df_to_text(obj, filename):
"""
Saves numpy array to tab-delimited text file
"""
obj.to_csv(filename, sep="\t")
def load_df_from_npz(filename):
"""
Loads numpy array from `.npz` file
"""
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
with np.load(filename, allow_pickle=True) as f:
obj = pd.DataFrame(**f)
return obj
def check_dir_exists(path):
"""
Checks if directory already exists or not and creates it if it doesn't
"""
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def worker_filter(iterable, worker_index, total_workers):
return (
p for i, p in enumerate(iterable) if (i - worker_index) % total_workers == 0
)
def fast_euclidean(mat):
D = mat.dot(mat.T)
squared_norms = np.diag(D).copy()
D *= -2.0
D += squared_norms.reshape((-1, 1))
D += squared_norms.reshape((1, -1))
D = np.sqrt(D)
D[D < 0] = 0
return squareform(D, checks=False)
def fast_ols_all_cols(X, Y):
pinv = np.linalg.pinv(X)
beta = np.dot(pinv, Y)
return beta
def fast_ols_all_cols_df(X, Y):
beta = fast_ols_all_cols(X, Y)
beta = | pd.DataFrame(beta, index=X.columns, columns=Y.columns) | pandas.DataFrame |
import pandas as pd
import numpy as np
import glob
import sys
import re
from scipy import interpolate
from astropy.cosmology import Planck15 as cosmo
from astropy.cosmology import z_at_value
import astropy.units as u
from cosmic.evolve import Evolve
from cosmic.sample.initialbinarytable import InitialBinaryTable
#----------------------------------------------------------------------------------
## phsyical constants
c = 2.99e10 ## speed of light in cm/s
secyr = 3.154e7 ## seconds per year
Myr = 1e6 ## years per Myr
Msun = 1.989e33 ## grams per solar mass
Lsun = 3.839e33 ## erg/sec per solar luminosity
#-----------------------------------------------------------------------------------
## analytic approximation for P(omega) from Dominik et al. 2015
def P_omega(omega_values):
return 0.374222*(1-omega_values)**2 + 2.04216*(1-omega_values)**4 - 2.63948*(1-omega_values)**8 + 1.222543*(1-omega_values)**10
#----------------------------------------------------------------------------------
### Monte Carlo sampling for detections above the given SNR threshold \
def calc_detection_prob(m1, m2, z_merge):
## constants that reflect LIGO design sensitivity
d_L8 = 1 ## in Gpc
M_8 = 10 ## in Msun \
SNR_thresh = 8
## approximate typical SNR from Fishbach et al. 2018
M_chirp = (m1*m2)**(3./5)/(m1+m2)**(1./5)
d_C = cosmo.comoving_distance(z_merge).to(u.Gpc).value
d_L = (1+z_merge)*d_C
rho_0 = 8*(M_chirp*(1+z_merge)/M_8)**(5./6)*d_L8/d_L ## this is the "typical/optimal" SNR
if (rho_0 < SNR_thresh): return 0
## sample omega according to distribution for omega via inverse CDF method
dist_size = 10000
sample_size = 1000
P_omega_dist = P_omega(np.linspace(0, 1, dist_size))
inv_P_omega = interpolate.interp1d(P_omega_dist, np.linspace(0, 1, dist_size), fill_value="extrapolate")
omega = inv_P_omega(np.random.uniform(0, 1, sample_size))
## find the true SNRs given sky location
rho = omega*rho_0
accept_SNR_num = len(rho[np.where(rho >= SNR_thresh)])
p_det = accept_SNR_num/sample_size
return p_det
#-----------------------------------------------------------------------------------#
def calc_flux(current_BH_mass, initial_BH_mass, mdot_BH, d_L):
bolometric_correction = 0.8
where_lower_masses = current_BH_mass < np.sqrt(6)*initial_BH_mass
eta_lower_masses = 1 - np.sqrt(1-(current_BH_mass/(3*initial_BH_mass))**2)
eta = np.where(where_lower_masses, eta_lower_masses, 0.42)
acc_rate = mdot_BH/(1-eta) ## accretion rate in Msun/year
luminosity = bolometric_correction*eta*acc_rate*c**2*Msun/secyr ## accretion luminosity in erg/sec
flux = luminosity/(4 * np.pi * d_L**2) ## flux in erg/s/cm^2
return flux
#----------------------------------------------------------------------------------
columns=['bin_num', 'metallicity', 'merger_type', 'bin_state', 'delay_time', 'lookback_time', 'z_f', 'p_det', 'p_cosmic', 'merge_by_z0', 'ZAMS_mass_k1','ZAMS_mass_k2', 'remnant_mass_k1', 'remnant_mass_k2', 'final_k1', 'final_k2', 'BH_mass_i', 'donor_mass_i', 'donor_type', 'XRB_sep_i', 'XRB_porb_i', 'emit11', 'emit13', 'emit15', 'emit_tot', 'this_BBH', 'this_BBHm', 'this_HMXB']
df_all = pd.DataFrame(columns=columns)
sampled_pop = pd.read_csv(sys.argv[1]) ## file of sampled population; file is structured like bpp array, csv format
sampled_initC = | pd.read_csv(sys.argv[2]) | pandas.read_csv |
from scrapers import scraper_modules as sm
import pandas as pd
from bs4 import BeautifulSoup
link = 'https://listsofjohn.com/PeakStats/select.php?R=P&sort=&P=0&S=WA'
def get_a_tags(html: str):
html_soup = BeautifulSoup(html, 'html.parser')
table_rows = html_soup.find_all('tr')
a_tags = []
for tr in table_rows:
a_tag = tr.find('a')
a_tags.append(a_tag)
return a_tags
def create_csv():
html = sm.download_html(link)
list_of_a_tags = get_a_tags(html)
peaks_dict = sm.create_peaks_dict(list_of_a_tags, 'a', title_retrieval_method=None)
df = | pd.DataFrame(peaks_dict) | pandas.DataFrame |
#!/usr/bin/python
# columns axis=0 (the default)
# rows axis=1
from scipy.interpolate import spline
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import numpy as np
import seaborn as sns
import datetime
import calendar
import json
MOOD_INFO_FILE = "moods_info.json"
MOOD_DATA_FILE = "mood.csv"
# lists of column titles that belong to each year in (2020, 2021)
MONTHS_2020 = ["1-20","2-20","3-20","4-20","5-20","6-20","7-20","8-20","9-20","10-20","11-20","12-20"]
MONTHS_2021 = ["1-21","2-21","3-21","4-21","5-21","6-21","7-21","8-21","9-21","10-21","11-21","12-21"]
BASE_DATE = datetime.date(2020, 1, 1)
# maps characters from csv to moods
char_mood_mapping = {
"a": "happy",
"b": "relaxed",
"c": "neutral",
"d": "anxious",
"e": "sad",
"f": "upset"
}
def read_input():
'''Read in, parse, and print input data.'''
with open(MOOD_INFO_FILE) as file:
mood_info = json.load(file)
df = pd.read_csv(MOOD_DATA_FILE)
print("raw data...\n")
counts(df)
return mood_info, df
def counts(df):
'''Collapse original df into monthly counts to summarize data.'''
df_monthly_counts = df.apply(pd.value_counts)
df_monthly_counts.drop(["x"], inplace=True)
df_monthly_counts.rename(index=char_mood_mapping, inplace=True)
print(df_monthly_counts)
def get_days_and_percentages(df, mood_info):
'''Calculate day counts and percentages by mood.'''
total_days = df.shape[0] * df.shape[1] - (df.values == "x").sum()
days = []
percents = []
for m in mood_info.keys():
num_days = (df.values == mood_info[m]["char"]).sum()
days.append(num_days)
percent = int(round(num_days/total_days, 2) * 100)
percents.append(percent)
return days, percents
def total(df, mood_info):
days, percents = get_days_and_percentages(df, mood_info)
percents_str = [str(p) + "%" for p in percents]
df_total = pd.DataFrame({'days':days,'%':percents_str}, index=list(mood_info.keys()))
print("in total...\n\n", df_total.to_string(), end="\n\n")
def annual(df, mood_info):
'''Plot bar chart of annual mood percentages.'''
# 2020
days2020, percents2020 = get_days_and_percentages(df[MONTHS_2020], mood_info)
ser2020 = pd.Series(percents2020, index=list(mood_info.keys()))
# 2021
days2021, percents2021 = get_days_and_percentages(df[MONTHS_2021], mood_info)
ser2021 = pd.Series(percents2021, index=list(mood_info.keys()))
# plot double bar: https://stackoverflow.com/questions/53228762/matplotlib-double-bar-graph-with-pandas-series
annual_df = | pd.DataFrame({"2020":ser2020, "2021":ser2021}) | pandas.DataFrame |
import numpy as np
import seaborn as sns
import pandas as pd
import math
import matplotlib.pyplot as plt
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"],
"text.latex.preamble": [r'\usepackage{amsfonts}'],
'font.size': 15,
"xtick.labelsize": 15, # large tick labels
"ytick.labelsize": 15, # large tick labels
'figure.figsize': [9, 6]} # default: 6.4 and 4.8
)
def barplot_err(x, y, yerr=None, legend_loc=0, data=None, ax=None, **kwargs):
_data = []
for _i in data.index:
_row = data.loc[_i]
if _row[yerr] is not None:
_data_i = pd.concat([data.loc[_i:_i]] * 3, ignore_index=True, sort=False)
_data_i[y] = [_row[y] - _row[yerr], _row[y], _row[y] + _row[yerr]]
else:
_data_i = pd.concat([data.loc[_i:_i]], ignore_index=True, sort=False)
_data_i[y] = _row[y]
_data.append(_data_i)
_data = pd.concat(_data, ignore_index=True, sort=False)
_ax = sns.barplot(x=x, y=y, data=_data, ci='sd', ax=ax, **kwargs)
_ax.legend(loc=legend_loc, fontsize=12)
# _ax.set_yscale("log")
return _ax
def plotLossFunction(results_folder_path, save_pdf=False):
plt.figure("Loss function")
filename2 = results_folder_path + "training_history.csv"
training_history = np.loadtxt(filename2, delimiter=",", skiprows=1)
steps = training_history[:, 0].astype(int)
loss_trj = training_history[:, 1]
cpu_time = training_history[-1, 2]
print("Training time %d seconds" % cpu_time)
# plot the loss function
plt.plot(steps, loss_trj, color='g', linewidth=2)
plt.xlabel('Steps')
plt.ylabel('Loss')
if save_pdf:
plt.savefig(results_folder_path + "loss_function.pdf", bbox_inches='tight', transparent="False", pad_inches=0)
#
# def plotFunctionTrajectory(results_folder_path, func_names):
# plt.figure("Function Trajectory")
# filename = results_folder_path + "function_value_data.csv"
# filename2 = results_folder_path + "training_history.csv"
# training_history = np.loadtxt(filename2, delimiter=",", skiprows=1)
# steps = training_history[:, 0].astype(int)
# loss_trj = training_history[:, 1]
# cpu_time = training_history[-1, 2]
# del training_history
# print("Training time %d seconds" % cpu_time)
#
# function_value_data = pd.read_csv(filename, delimiter=",",
# names=func_names)
#
# function_value_data.insert(loc=0, column="Steps", value=steps)
# function_value_data.set_index("Steps")
# sns.set(rc={'figure.figsize': (10, 7)})
# for f in func_names:
# ax = sns.lineplot(x="Steps", y=f, data=function_value_data)
# ax.set(ylabel="function values")
# plt.legend(labels=func_names)
def plot_validation_charts_function(results_folder_path, func_names, exact_values, save_pdf=False):
filename = results_folder_path + "function_value_data.csv"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names)
dnn_func_values = function_value_data.tail(1).to_numpy()[0, :]
del function_value_data
# we first compare the mean function estimates
filename = results_folder_path + "SimulationValidation.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
ssa_func_values_mean = function_value_data.values[0, :]
ssa_func_values_std = function_value_data.values[1, :]
dict1 = {"Function": func_names, "Estimate": ssa_func_values_mean,
"Error": 1.96 * ssa_func_values_std, "Estimator": "SSA"}
dict2 = {"Function": func_names, "Estimate": dnn_func_values,
"Error": None, "Estimator": "DeepCME"}
df1 = pd.DataFrame(dict1)
df2 = pd.DataFrame(dict2)
if exact_values is not None:
dict3 = {"Function": func_names, "Estimate": np.array(exact_values),
"Error": None, "Estimator": "Exact"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df2, df1, df3])
else:
filename = results_folder_path + "SimulationValidation_exact.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
sim_est_func_values_mean2 = function_value_data.values[0, :]
sim_est_values_std2 = function_value_data.values[1, :]
dict3 = {"Function": func_names, "Estimate": sim_est_func_values_mean2,
"Error": 1.96 * sim_est_values_std2, "Estimator": "mNRM ($10^4$ samples)"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df2, df1, df3])
df.set_index(np.arange(0, 3 * len(func_names)), inplace=True)
plt.figure("Estimated function values")
barplot_err(x="Function", y="Estimate", legend_loc=3, yerr="Error", hue="Estimator",
capsize=.2, data=df)
if save_pdf:
plt.savefig(results_folder_path + "func_estimates.pdf", bbox_inches='tight', transparent="False", pad_inches=0)
def plot_validation_charts_function_separate(results_folder_path, func_names, exact_values, save_pdf=False):
filename = results_folder_path + "function_value_data.csv"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names)
dnn_func_values = function_value_data.tail(1).to_numpy()[0, :]
print(dnn_func_values)
del function_value_data
# we first compare the mean function estimates
filename = results_folder_path + "SimulationValidation.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
sim_est_func_values_mean = function_value_data.values[0, :]
sim_est_values_std = function_value_data.values[1, :]
dict1 = {"Function": func_names, "Estimate": sim_est_func_values_mean,
"Error": 1.96 * sim_est_values_std, "Estimator": "mNRM ($10^3$ samples)"}
dict2 = {"Function": func_names, "Estimate": dnn_func_values,
"Error": None, "Estimator": "DeepCME"}
df1 = pd.DataFrame(dict1)
df2 = pd.DataFrame(dict2)
if exact_values is not None:
dict3 = {"Function": func_names, "Estimate": np.array(exact_values),
"Error": None, "Estimator": "Exact"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df2, df1, df3])
else:
filename = results_folder_path + "SimulationValidation_exact.txt"
function_value_data = pd.read_csv(filename, delimiter=",",
names=func_names, skiprows=3)
sim_est_func_values_mean2 = function_value_data.values[0, :]
sim_est_values_std2 = function_value_data.values[1, :]
dict3 = {"Function": func_names, "Estimate": sim_est_func_values_mean2,
"Error": 1.96 * sim_est_values_std2, "Estimator": "mNRM ($10^4$ samples)"}
df3 = pd.DataFrame(dict3)
df = pd.concat([df2, df1, df3])
df.set_index(np.arange(0, 3 * len(func_names)), inplace=True)
fig, axs = plt.subplots(1, len(func_names), num="Estimated function values")
for i in range(len(func_names)):
barplot_err(x="Function", y="Estimate", legend_loc=3, yerr="Error", hue="Estimator",
capsize=.2, data=df.loc[df["Function"] == func_names[i]], ax=axs[i])
if i == 0:
axs[i].set_ylabel("Function Estimate")
else:
axs[i].set_ylabel("")
axs[i].set_title(func_names[i])
axs[i].set_xticklabels("")
axs[i].set_xlabel("")
if save_pdf:
plt.savefig(results_folder_path + "func_estimates.pdf", bbox_inches='tight', transparent="False", pad_inches=0)
def plot_validation_charts_sensitivity(results_folder_path, func_names, parameter_list, parameter_labels,
exact_sens_estimates,
save_pdf=False):
filename1 = results_folder_path + "BPA_Sens_Values.txt"
filename2 = results_folder_path + "DNN_Sens_Values.txt"
filename3 = results_folder_path + "BPA_Sens_Values_exact.txt"
if exact_sens_estimates is not None:
sens_exact2 = pd.DataFrame(exact_sens_estimates, columns=func_names)
func_names.insert(0, "Parameter")
else:
func_names.insert(0, "Parameter")
sens_exact2 = pd.read_csv(filename3, delimiter=",", names=func_names, skiprows=3)
sens_bpa2 = pd.read_csv(filename1, delimiter=",", names=func_names, skiprows=3)
sens_dnn2 = pd.read_csv(filename2, delimiter=",", names=func_names, skiprows=1)
if exact_sens_estimates is not None:
sens_exact2.insert(0, "Parameter", sens_dnn2["Parameter"])
if parameter_list:
_extended = []
for param in parameter_list:
_extended.append(param + "(std.)")
for param in _extended:
parameter_list.append(param)
sens_bpa = sens_bpa2[sens_bpa2["Parameter"].isin(parameter_list)]
sens_dnn = sens_dnn2[sens_dnn2["Parameter"].isin(parameter_list)]
sens_exact = sens_exact2[sens_exact2["Parameter"].isin(parameter_list)]
else:
sens_bpa = sens_bpa2
sens_dnn = sens_dnn2
sens_exact = sens_exact2
addl_col1 = ["BPA ($10^3$ samples)" for _ in range(len(sens_bpa.index))]
addl_col2 = ["DeepCME" for _ in range(len(sens_dnn.index))]
if exact_sens_estimates is not None:
addl_col3 = ["Exact" for _ in range(len(sens_exact.index))]
else:
addl_col3 = ["BPA ($10^4$ samples)" for _ in range(len(sens_exact.index))]
sens_bpa.insert(3, "Estimator", addl_col1)
sens_dnn.insert(3, "Estimator", addl_col2)
sens_exact.insert(3, "Estimator", addl_col3)
sens_bpa_mean = sens_bpa.head(int(sens_bpa.shape[0] / 2))
sens_bpa_mean_std = sens_bpa.tail(int(sens_bpa.shape[0] / 2))
if exact_sens_estimates is None:
sens_exact_mean = sens_exact.head(int(sens_exact.shape[0] / 2))
sens_exact_mean_std = sens_exact.tail(int(sens_exact.shape[0] / 2))
fig, axs = plt.subplots(1, len(func_names) - 1, num='Estimated Parameter Sensitivities for output functions')
for i in range(len(func_names) - 1):
# sns.barplot(data=df, x="Parameter", y=func_names[i + 1], hue="Estimator")
f1 = sens_bpa_mean[["Parameter", func_names[i + 1], "Estimator"]]
stds = sens_bpa_mean_std[func_names[i + 1]].to_numpy()
f1.insert(2, "Error", 1.96 * stds)
f2 = sens_dnn[["Parameter", func_names[i + 1], "Estimator"]]
f2.insert(2, "Error", None)
if exact_sens_estimates is not None:
f3 = sens_exact[["Parameter", func_names[i + 1], "Estimator"]]
f3.insert(2, "Error", None)
df = pd.concat([f2, f1, f3])
else:
f3 = sens_exact_mean[["Parameter", func_names[i + 1], "Estimator"]]
stds = sens_exact_mean_std[func_names[i + 1]].to_numpy()
f3.insert(2, "Error", 1.96 * stds)
df = | pd.concat([f2, f1, f3]) | pandas.concat |
"""
MIT License
Copyright (c) 2021, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------------------------------------------------
Experiment using SVM classifier
=====================
This class implements our experiment using Support Vector Machine (SVM) classifier, if different classifier is required
then refer to the comments in the classifier section for instructions, only few changes needed for updating the experiment
Note: This experiment would need data from hypervolume calculation which can be done by using the R script (hyper_vol_usage.R)
This experiment uses optional sklearnex package to provoide optimization for sklearn library running on intel processors
"""
import collections
import sys
from operator import itemgetter
from sklearn.cluster import KMeans
from source_code.adversaries.kpp_attack import KppAttack
from source_code.adversaries.mk_attack import MkAttack
from source_code.adversaries.stat_attack import StatAttack
from source_code.adversaries.hyp_attack import HypVolAttack
from source_code.dataset.biometric_dataset import BioDataSet
import numpy as np
import pandas as pd
import os
import seaborn as sns
from joblib import dump, load
import matplotlib.pyplot as plt
from source_code.metrics.confusion_matrix import ConfusionMatrix
from source_code.metrics.fcs import FCS
from source_code.metrics.roc_curve import RocCurve
from source_code.synth_data_gen.gauss_blob_generator import GaussBlob
from source_code.analytics.dataoverlap_interval import OverLapInt
import traceback
class HypExp:
def __init__(self, pop_df, attack_df, pop_classifier_path, pos_user_per_dim_ol_path, active_gr,
results_save_path=None,
attack_samples=1000, boot_strap_st_at=False, bs_data_path=None, bs_mul=1,
hv_cut_off=0.04, gr2_per_dim_ol_path=None, std_dev_at_gr=5, clf_type=None,
hyp_at_u_data=None, rand_state=None, train_data_size=0.6,
train_classifiers=False, cluster_data_path=None, hyp_vol_data_path=None,
num_cls=None, cv=10, random_grid_search_iter=25):
self.pop_df = pop_df.copy()
self.attack_df = attack_df.copy()
self.active_gr = active_gr
self.classifier_training = train_classifiers
self.results_save_path = results_save_path
self.clf_type = clf_type
self.rand_state = rand_state
self.attack_samples = attack_samples
self.boot_strap_st_at = boot_strap_st_at
self.train_data_size = train_data_size
self.num_cls = num_cls
self.cv = cv
self.n_iter = random_grid_search_iter
self.bs_mul = bs_mul
self.hv_cut_off = hv_cut_off
self.feat_list = None
self.clf_path = pop_classifier_path
self.pos_user_per_dim_ol_path = pos_user_per_dim_ol_path
self.bs_data_path = bs_data_path
self.gr2_per_dim_ol_path = gr2_per_dim_ol_path
self.std_dev_at_gr = std_dev_at_gr
self.cluster_data_path = cluster_data_path
self.hyp_vol_data_path = hyp_vol_data_path
# creating dictionaries for data gathering
self.test_prd_dict = dict()
self.test_prd_prob_dict = dict()
self.test_cm_dict = dict()
self.test_precision = dict()
self.roc_dict = dict()
self.fcs_dict = dict()
self.fcs_plt = dict()
self.att_prd_mk = dict()
self.att_prd_prob_mk = dict()
self.att_prd_kpp = dict()
self.att_prd_prob_kpp = dict()
self.att_prd_stat = dict()
self.att_prd_prob_stat = dict()
self.att_prd_hyp = dict()
self.att_prd_prob_hyp = dict()
# Attack Data
self.attack_df_kpp = None
self.attack_df_mk = None
self.attack_df_stat = None
if hyp_at_u_data is not None:
self.attack_df_hyp = hyp_at_u_data
else:
self.attack_df_hyp = None
# Result Data
self.acc_res_full_df = None
self.acc_res_df = None
self.acc_per_df = None
self.acc_eer_df = None
self.stack_res_df = None
return
def run_exp(self):
data_group_1 = dict()
clf_dict = dict()
gr2_means = self.attack_df.mean()
gr2_means_fv = gr2_means.drop('user', axis=0).to_numpy().reshape(1, -1)
gr2_std = self.attack_df.std()
gr2_std_fv = gr2_std.drop('user', axis=0).to_numpy().reshape(1, -1)
tb_data_group_1 = BioDataSet(feature_data_frame=self.pop_df, random_state=self.rand_state)
tb_data_group_2 = BioDataSet(feature_data_frame=self.attack_df, random_state=self.rand_state)
# Extracting users in both groups
users_group_1 = tb_data_group_1.user_list
users_group_2 = tb_data_group_2.user_list
self.feat_list = self.pop_df.columns.drop('user').to_list()
"""
Generating user data
"""
user_g1_df_dict = dict()
for user in users_group_1:
data_group_1[user] = tb_data_group_1.get_data_set(user, neg_sample_sources=None, neg_test_limit=True)
user_g1_df_dict[user] = self.pop_df[self.pop_df['user'] == user]
if self.classifier_training is True:
scoring_metric = 'precision'
self.cv = 10 # specify a number for cv fold cross validation
self.n_iter = 25 #number of iterations for random grid search
precision_tup = list()
eer_tup = list()
print(f"training classifiers")
if self.clf_type == 'svm':
# Commnet out two lines below if not using an intel processor or sklearnex is not installed
from sklearnex import patch_sklearn
patch_sklearn()
from classifiers.svm_classifier import SvmClassifier
# Classifier training grid params, update with classifer specic hyper parameters
c_range = np.unique(np.logspace(start=0.1, stop=4, num=100 + 20, dtype=int))
grid_svm = {'estimator__C': c_range,
'estimator__gamma': ['auto', 'scale']}
# Update classifier on line below for using a different classifer
clf_dict = {usr: SvmClassifier(pos_user=data_group_1[usr], random_state=self.rand_state)
for usr in users_group_1}
for usr in users_group_1:
print(f'training for user {usr}')
clf_name_string = f"clf_{usr}_{self.clf_type}_{self.rand_state}.joblib"
clf_dict[usr].split_data(data_frame=data_group_1[usr], training_data_size=0.6)
clf_dict[usr].random_train_tune_parameters(pram_dist=grid_svm, cv=self.cv, scoring_metric=scoring_metric,
n_itr=self.n_iter)
dump(clf_dict[usr], os.path.join(self.clf_path, clf_name_string))
elif self.clf_type == 'knn':
from classifiers.knn_classifier import KnnClassifier
leaf_size = list(range(1, 70))
n_neighbors = list(range(1, 50))
p = [1, 2]
grid_knn = dict(leaf_size=leaf_size, n_neighbors=n_neighbors, p=p)
clf_dict = {usr: KnnClassifier(pos_user=data_group_1[usr], random_state=self.rand_state,
n_jobs=-1) for usr in users_group_1}
for usr in users_group_1:
print(f'training for user {usr}')
clf_name_string = f"clf_{usr}_{self.clf_type}_{self.rand_state}.joblib"
clf_dict[usr].split_data(data_frame=data_group_1[usr], training_data_size=0.6)
clf_dict[usr].random_train_tune_parameters(pram_dist=grid_knn, cv=self.cv,
scoring_metric=scoring_metric,
n_itr=self.n_iter)
dump(clf_dict[usr], os.path.join(self.clf_path, clf_name_string))
elif self.clf_type == 'rf':
# Commnet out two lines below if not using an intel processor or sklearnex is not installed
from classifiers.random_forest_classifier import RandomForestClassifier
from sklearnex import patch_sklearn
patch_sklearn()
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start=200, stop=2000, num=10)]
# Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num=11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
grid_rf = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
clf_dict = {usr: RandomForestClassifier(pos_user=data_group_1[usr], random_state=self.rand_state,
n_jobs=-1) for usr in users_group_1}
for usr in users_group_1:
print(f'training for user {usr}')
clf_name_string = f"clf_{usr}_{self.clf_type}_{self.rand_state}.joblib"
clf_dict[usr].split_data(data_frame=data_group_1[usr], training_data_size=0.6)
clf_dict[usr].random_train_tune_parameters(pram_dist=grid_rf, cv=self.cv,
scoring_metric=scoring_metric,
n_itr=self.n_iter)
dump(clf_dict[usr], os.path.join(self.clf_path, clf_name_string))
else:
print('classifier not implimented')
sys.exit(1)
print(f"training classifiers complete")
else:
"""
Loading classifiers from disk
"""
print(f"Loading classifiers")
try:
clf_dict = {usr: load(os.path.join(self.clf_path, f"clf_{usr}_{self.clf_type}_{self.rand_state}.joblib"))
for usr in users_group_1}
except Exception():
traceback.print_exc()
print(f"Loading classifiers complete")
"""
Calculating mean overlaps on feature level
"""
print(f"Calculating mean overlaps on feature level started")
overlap_other_per_user_means_df = pd.DataFrame()
overlap_by_other_per_user_means_df = pd.DataFrame()
for pos_user in users_group_1:
pos_user_per_dim_ol_path = self.pos_user_per_dim_ol_path
pos_user_per_dim_ol = pd.read_csv(pos_user_per_dim_ol_path)
pos_user_per_dim_ol = pos_user_per_dim_ol.drop('Unnamed: 0', axis=1)
pos_user_pd_ol_others = pos_user_per_dim_ol[(pos_user_per_dim_ol['V2'] == pos_user)]
pos_user_pd_ol_others_mean = pos_user_pd_ol_others.drop(['V1', 'V2'], axis=1).mean()
overlap_other_per_user_means_df[pos_user] = pos_user_pd_ol_others_mean
pos_user_pd_ol_by_others = pos_user_per_dim_ol[(pos_user_per_dim_ol['V1'] == pos_user)]
pos_user_pd_ol_by_others_mean = \
pos_user_pd_ol_by_others.drop(['V1', 'V2'], axis=1).mean().sort_values()
overlap_by_other_per_user_means_df[pos_user] = pos_user_pd_ol_by_others_mean
print(f"Calculating mean overlaps on feature level complete")
"""
Calculating mean statistics for overlaps over entire population
"""
overlap_other_means = overlap_other_per_user_means_df.mean(axis=1)
overlap_other_means = overlap_other_means.sort_values(ascending=True)
overlap_other_range = overlap_other_per_user_means_df.max(axis=1) - overlap_other_per_user_means_df.min(axis=1)
overlap_other_range = overlap_other_range.sort_values(ascending=True)
overlap_by_other_means = overlap_by_other_per_user_means_df.mean(axis=1)
overlap_by_other_means = overlap_by_other_means.sort_values(ascending=True)
overlap_by_other_range = overlap_by_other_per_user_means_df.max(
axis=1) - overlap_by_other_per_user_means_df.min(axis=1)
overlap_by_other_range = overlap_by_other_range.sort_values(ascending=True)
'''
Model Classification
'''
print(f"Starting model classification")
self.test_prd_dict = {usr: clf_dict[usr].classify() for usr in users_group_1}
self.test_prd_prob_dict = {usr: clf_dict[usr].predictions_prob for usr in users_group_1}
print(f"Model classification complete")
"""
Test set and labels extraction
"""
test_set = {usr: clf_dict[usr].test_data_frame.drop('labels', axis=1) for usr in users_group_1}
test_labels = {usr: clf_dict[usr].test_data_frame.labels.values for usr in users_group_1}
"""
Confusion Matrix
"""
self.test_cm_dict = {usr: ConfusionMatrix() for usr in users_group_1}
matrix_svm = {usr: self.test_cm_dict[usr].get_metric(true_labels=test_labels[usr],
predicted_labels=self.test_prd_dict[usr])
for usr in users_group_1}
self.test_precision = {usr: self.test_cm_dict[usr].tp / (self.test_cm_dict[usr].tp + self.test_cm_dict[usr].fp)
for usr in users_group_1}
"""
ROC Curves
"""
self.roc_dict = {usr: RocCurve() for usr in users_group_1}
roc_svm = {usr: self.roc_dict[usr].get_metric(test_set_features=test_set[usr].values,
test_set_labels=test_labels[usr],
classifier=clf_dict[usr].classifier, ax=None)
for usr in users_group_1}
"""
FCS
"""
self.fcs_dict = {usr: FCS(classifier_name='SVM') for usr in users_group_1}
self.fcs_plt = {usr: self.fcs_dict[usr].get_metric(
true_labels=test_labels[usr],
predicted_probs=clf_dict[usr].predictions_prob,
pred_labels=clf_dict[usr].predictions)
for usr in users_group_1}
plt.close('all')
"""
Master Key Attack
"""
# Generating attack set
mk_adv = MkAttack(data=self.attack_df, required_attack_samples=self.attack_samples)
self.attack_df_mk = mk_adv.generate_attack()
# Performing attack
self.att_prd_mk = {usr: clf_dict[usr].classifier.predict(self.attack_df_mk.values)
for usr in users_group_1}
att_prd_prob_mk = {usr: clf_dict[usr].classifier.predict_proba(self.attack_df_mk.values)
for usr in users_group_1}
self.att_prd_prob_mk = {usr: att_prd_prob_mk[usr][:, 1]
for usr in users_group_1}
"""
Targeted K-means++ Attack
"""
# Generating attack set, first point is the mean of the attack data
kpp_adv = KppAttack(data=self.attack_df, required_attack_samples=self.attack_samples)
self.attack_df_kpp = kpp_adv.generate_attack()
# Performing attack
self.att_prd_kpp = {usr: clf_dict[usr].classifier.predict(self.attack_df_kpp.values)
for usr in users_group_1}
att_prd_prob_kpp = {usr: clf_dict[usr].classifier.predict_proba(self.attack_df_kpp.values)
for usr in users_group_1}
self.att_prd_prob_kpp = {usr: att_prd_prob_kpp[usr][:, 1]
for usr in users_group_1}
"""
Stats Attack
"""
stat_adv = StatAttack(data=self.attack_df, required_attack_samples=self.attack_samples,
bootstrap_data_path=self.bs_data_path,
run_bootstrap=self.boot_strap_st_at, bootstrap_iter=self.bs_mul * 1000)
self.attack_df_stat = stat_adv.generate_attack()
# Performing attack
self.att_prd_stat = {usr: clf_dict[usr].classifier.predict(self.attack_df_stat.values)
for usr in users_group_1}
att_prd_prob_stat = {usr: clf_dict[usr].classifier.predict_proba(self.attack_df_stat.values)
for usr in users_group_1}
self.att_prd_prob_stat = {usr: att_prd_prob_stat[usr][:, 1]
for usr in users_group_1}
"""
Hypervolume Attack
"""
if self.attack_df_hyp is None:
hyp_adv = HypVolAttack(data=self.attack_df, equal_user_data=False, random_state=self.rand_state,
calc_clusters=False,
clusters_path=self.cluster_data_path, gr_num=1, cluster_count=self.num_cls,
ol_path=self.hyp_vol_data_path, attack_samples=self.attack_samples,
ol_cut_off=self.hv_cut_off, std_dev_at_gr=None)
self.attack_df_hyp = hyp_adv.generate_attack()
else:
self.attack_df_hyp = self.attack_df_hyp
# Performing attack
self.att_prd_hyp = {usr: clf_dict[usr].classifier.predict(
self.attack_df_hyp.drop(["cluster_number"], axis=1).values)
for usr in users_group_1}
att_prd_prob_hyp = {usr: clf_dict[usr].classifier.predict_proba(
self.attack_df_hyp.drop(["cluster_number"], axis=1).values)
for usr in users_group_1}
self.att_prd_prob_hyp = {usr: att_prd_prob_hyp[usr][:, 1]
for usr in users_group_1}
df_hyp = pd.DataFrame.from_dict(self.att_prd_hyp)
df_stat = pd.DataFrame.from_dict(self.att_prd_stat)
df_kpp = pd.DataFrame.from_dict(self.att_prd_kpp)
df_mk = pd.DataFrame.from_dict(self.att_prd_mk)
df_prob_hyp = pd.DataFrame.from_dict(self.att_prd_prob_hyp)
df_prob_stat = pd.DataFrame.from_dict(self.att_prd_prob_stat)
df_prob_kpp = | pd.DataFrame.from_dict(self.att_prd_prob_kpp) | pandas.DataFrame.from_dict |
import datetime
import glob
import logging
import time
from unittest.mock import patch
import boto3
import botocore
import pandas as pd
import pytest
import pytz
import awswrangler as wr
API_CALL = botocore.client.BaseClient._make_api_call
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
@pytest.mark.parametrize("sanitize_columns,col", [(True, "foo_boo"), (False, "FooBoo")])
def test_sanitize_columns(path, sanitize_columns, col):
df = pd.DataFrame({"FooBoo": [1, 2, 3]})
# Parquet
file_path = f"{path}0.parquet"
wr.s3.to_parquet(df, path=file_path, sanitize_columns=sanitize_columns)
df = wr.s3.read_parquet(file_path)
assert len(df.index) == 3
assert len(df.columns) == 1
assert df.columns == [col]
# CSV
file_path = f"{path}0.csv"
wr.s3.to_csv(df, path=file_path, sanitize_columns=sanitize_columns, index=False)
df = wr.s3.read_csv(file_path)
assert len(df.index) == 3
assert len(df.columns) == 1
assert df.columns == [col]
def test_list_by_last_modified_date(path):
df = pd.DataFrame({"id": [1, 2, 3]})
path0 = f"s3://{path}0.json"
path1 = f"s3://{path}1.json"
begin_utc = pytz.utc.localize(datetime.datetime.utcnow())
time.sleep(5)
wr.s3.to_json(df, path0)
time.sleep(5)
mid_utc = pytz.utc.localize(datetime.datetime.utcnow())
time.sleep(5)
wr.s3.to_json(df, path1)
time.sleep(5)
end_utc = pytz.utc.localize(datetime.datetime.utcnow())
assert len(wr.s3.read_json(path).index) == 6
assert len(wr.s3.read_json(path, last_modified_begin=mid_utc).index) == 3
assert len(wr.s3.read_json(path, last_modified_end=mid_utc).index) == 3
with pytest.raises(wr.exceptions.NoFilesFound):
wr.s3.read_json(path, last_modified_begin=end_utc)
with pytest.raises(wr.exceptions.NoFilesFound):
wr.s3.read_json(path, last_modified_end=begin_utc)
assert len(wr.s3.read_json(path, last_modified_begin=mid_utc, last_modified_end=end_utc).index) == 3
assert len(wr.s3.read_json(path, last_modified_begin=begin_utc, last_modified_end=mid_utc).index) == 3
assert len(wr.s3.read_json(path, last_modified_begin=begin_utc, last_modified_end=end_utc).index) == 6
def test_delete_internal_error(bucket):
response = {
"Errors": [
{
"Key": "foo/dt=2020-01-01 00%3A00%3A00/boo.txt",
"Code": "InternalError",
"Message": "We encountered an internal error. Please try again.",
}
]
}
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "DeleteObjects":
return response
return API_CALL(self, operation_name, kwarg)
start = time.time()
with patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call):
path = f"s3://{bucket}/foo/dt=2020-01-01 00:00:00/boo.txt"
with pytest.raises(wr.exceptions.ServiceApiError):
wr.s3.delete_objects(path=[path])
assert 15 <= (time.time() - start) <= 25
def test_delete_error(bucket):
response = {"Errors": [{"Code": "AnyNonInternalError"}]}
def mock_make_api_call(self, operation_name, kwarg):
if operation_name == "DeleteObjects":
return response
return API_CALL(self, operation_name, kwarg)
with patch("botocore.client.BaseClient._make_api_call", new=mock_make_api_call):
path = f"s3://{bucket}/boo/dt=2020-01-01 00:00:00/bar.txt"
with pytest.raises(wr.exceptions.ServiceApiError):
wr.s3.delete_objects(path=[path])
def test_s3_empty_dfs():
df = pd.DataFrame()
with pytest.raises(wr.exceptions.EmptyDataFrame):
wr.s3.to_parquet(df=df, path="")
with pytest.raises(wr.exceptions.EmptyDataFrame):
wr.s3.to_csv(df=df, path="")
def test_absent_object(path):
path_file = f"{path}test_absent_object"
assert wr.s3.does_object_exist(path=path_file) is False
assert len(wr.s3.size_objects(path=path_file)) == 0
@pytest.mark.parametrize("use_threads", [True, False])
def test_merge(path, use_threads):
path1 = f"{path}test_merge/"
df = | pd.DataFrame({"id": [1, 2, 3], "par": [1, 2, 3]}) | pandas.DataFrame |
import logging
import tarfile
from tempfile import TemporaryFile
from typing import List, Dict
import numpy as np
import pandas as pd
from kgx.transformer import Transformer
from kgx.utils import make_path
LIST_DELIMITER = '|'
_column_types = {
'publications': list,
'qualifiers': list,
'category': list,
'synonym': list,
'provided_by': list,
'same_as': list,
'negated': bool,
}
_extension_types = {
'csv': ',',
'tsv': '\t',
'txt': '|'
}
_archive_mode = {
'tar': 'r',
'tar.gz': 'r:gz',
'tar.bz2': 'r:bz2'
}
_archive_format = {
'w': 'tar',
'w:gz': 'tar.gz',
'w:bz2': 'tar.bz2'
}
class PandasTransformer(Transformer):
"""
Transformer that parses a pandas.DataFrame, and loads nodes and edges into a networkx.MultiDiGraph
"""
# TODO: Support parsing and export of neo4j-import tool compatible CSVs with appropriate headers
def parse(self, filename: str, input_format: str = 'csv', **kwargs) -> None:
"""
Parse a CSV/TSV (or plain text) file.
The file can represent either nodes (nodes.csv) or edges (edges.csv) or both (data.tar),
where the tar archive contains nodes.csv and edges.csv
The file can also be data.tar.gz or data.tar.bz2
Parameters
----------
filename: str
File to read from
input_format: str
The input file format ('csv', by default)
kwargs: Dict
Any additional arguments
"""
if 'delimiter' not in kwargs:
# infer delimiter from file format
kwargs['delimiter'] = _extension_types[input_format]
if filename.endswith('.tar'):
mode = _archive_mode['tar']
elif filename.endswith('.tar.gz'):
mode = _archive_mode['tar.gz']
elif filename.endswith('.tar.bz2'):
mode = _archive_mode['tar.bz2']
else:
# file is not an archive
mode = None
if mode:
with tarfile.open(filename, mode=mode) as tar:
for member in tar.getmembers():
f = tar.extractfile(member)
df = pd.read_csv(f, comment='#', **kwargs) # type: pd.DataFrame
if member.name == "nodes.{}".format(input_format):
self.load_nodes(df)
elif member.name == "edges.{}".format(input_format):
self.load_edges(df)
else:
raise Exception('Tar archive contains an unrecognized file: {}'.format(member.name))
else:
df = pd.read_csv(filename, comment='#', dtype=str, **kwargs) # type: pd.DataFrame
self.load(df)
def load(self, df: pd.DataFrame) -> None:
"""
Load a panda.DataFrame, containing either nodes or edges, into a networkx.MultiDiGraph
Parameters
----------
df : pandas.DataFrame
Dataframe containing records that represent nodes or edges
"""
if 'subject' in df:
self.load_edges(df)
else:
self.load_nodes(df)
def load_nodes(self, df: pd.DataFrame) -> None:
"""
Load nodes from pandas.DataFrame into a networkx.MultiDiGraph
Parameters
----------
df : pandas.DataFrame
Dataframe containing records that represent nodes
"""
for obj in df.to_dict('record'):
self.load_node(obj)
def load_node(self, node: Dict) -> None:
"""
Load a node into a networkx.MultiDiGraph
Parameters
----------
node : dict
A node
"""
node = Transformer.validate_node(node)
kwargs = PandasTransformer._build_kwargs(node.copy())
if 'id' in kwargs:
n = kwargs['id']
self.graph.add_node(n, **kwargs)
else:
logging.info("Ignoring node with no 'id': {}".format(node))
def load_edges(self, df: pd.DataFrame) -> None:
"""
Load edges from pandas.DataFrame into a networkx.MultiDiGraph
Parameters
----------
df : pandas.DataFrame
Dataframe containing records that represent edges
"""
for obj in df.to_dict('record'):
self.load_edge(obj)
def load_edge(self, edge: Dict) -> None:
"""
Load an edge into a networkx.MultiDiGraph
Parameters
----------
edge : dict
An edge
"""
edge = Transformer.validate_edge(edge)
kwargs = PandasTransformer._build_kwargs(edge.copy())
if 'subject' in kwargs and 'object' in kwargs:
s = kwargs['subject']
o = kwargs['object']
self.graph.add_edge(s, o, **kwargs)
else:
logging.info("Ignoring edge with either a missing 'subject' or 'object': {}".format(kwargs))
def export_nodes(self) -> pd.DataFrame:
"""
Export nodes from networkx.MultiDiGraph as a pandas.DataFrame
Returns
-------
pandas.DataFrame
A Dataframe where each record corresponds to a node from the networkx.MultiDiGraph
"""
rows = []
for n, data in self.graph.nodes(data=True):
data = self.validate_node(data)
row = PandasTransformer._build_export_row(data.copy())
row['id'] = n
rows.append(row)
df = | pd.DataFrame.from_records(rows) | pandas.DataFrame.from_records |
"""
.. _l-b-numpy-numba-ort:
Compares numba, numpy, onnxruntime for simple functions
=======================================================
The following benchmark is inspired from `bench_arrayexprs.py
<https://github.com/numba/numba-benchmark/blob/master/benchmarks/bench_arrayexprs.py>`_.
It compares :epkg:`numba`, :epkg:`numpy` and :epkg:`onnxruntime`
for simple functions. As expected, :epkg:`numba` is better than the other options.
.. contents::
:local:
The functions
+++++++++++++
"""
import numpy
import pandas
import matplotlib.pyplot as plt
from numba import jit
from typing import Any
import numpy as np
from tqdm import tqdm
from cpyquickhelper.numbers.speed_measure import measure_time
from mlprodict.npy import NDArray, onnxnumpy_np
from mlprodict.npy.onnx_numpy_annotation import NDArrayType
import mlprodict.npy.numpy_onnx_impl as npnx
# @jit(nopython=True)
def sum(a, b):
return a + b
# @jit(nopython=True)
def sq_diff(a, b):
return (a - b) * (a + b)
# @jit(nopython=True)
def rel_diff(a, b):
return (a - b) / (a + b)
# @jit(nopython=True)
def square(a, b):
# Note this is currently slower than `a ** 2 + b`, due to how LLVM
# seems to lower the power intrinsic. It's still faster than the naive
# lowering as `exp(2 * log(a))`, though
return a ** 2
def cube(a, b):
return a ** 3
#########################################
# ONNX version
# ++++++++++
#
# The implementation uses the numpy API for ONNX to keep the same code.
@onnxnumpy_np(signature=NDArrayType(("T:all", "T"), dtypes_out=('T',)),
runtime="onnxruntime")
def onnx_sum_32(a, b):
return a + b
@onnxnumpy_np(signature=NDArrayType(("T:all", "T"), dtypes_out=('T',)),
runtime="onnxruntime")
def onnx_sq_diff_32(a, b):
return (a - b) * (a + b)
@onnxnumpy_np(signature=NDArrayType(("T:all", "T"), dtypes_out=('T',)),
runtime="onnxruntime")
def onnx_rel_diff_32(a, b):
return (a - b) / (a + b)
@onnxnumpy_np(signature=NDArrayType(("T:all", "T"), dtypes_out=('T',)),
runtime="onnxruntime")
def onnx_square_32(a, b):
return a ** 2
@onnxnumpy_np(signature=NDArrayType(("T:all", "T"), dtypes_out=('T',)),
runtime="onnxruntime")
def onnx_cube_32(a, b):
return a ** 3
################################################
# numba optimized
# ++++++++++++
jitter = jit(nopython=True)
nu_sum = jitter(sum)
nu_sq_diff = jitter(sq_diff)
nu_rel_diff = jitter(rel_diff)
nu_square = jitter(square)
nu_cube = jitter(cube)
#######################################
# Benchmark
# ++++++++
obs = []
for n in tqdm([10, 100, 1000, 10000, 100000, 1000000]):
number = 100 if n < 1000000 else 10
for dtype in [numpy.float32, numpy.float64]:
sample = [numpy.random.uniform(1.0, 2.0, size=n).astype(dtype)
for i in range(2)]
for fct1, fct2, fct3 in [
(sum, nu_sum, onnx_sum_32),
(sq_diff, nu_sq_diff, onnx_sq_diff_32),
(rel_diff, nu_rel_diff, onnx_rel_diff_32),
(square, nu_square, onnx_square_32),
(cube, nu_cube, onnx_cube_32)]:
fct1(*sample)
fct1(*sample)
r = measure_time('fct1(a,b)', number=number, div_by_number=True,
context={'fct1': fct1, 'a': sample[0], 'b': sample[1]})
r.update(dict(dtype=dtype, name='numpy', n=n, fct=fct1.__name__))
obs.append(r)
fct2(*sample)
fct2(*sample)
r = measure_time('fct2(a,b)', number=number, div_by_number=True,
context={'fct2': fct2, 'a': sample[0], 'b': sample[1]})
r.update(dict(dtype=dtype, name='numba', n=n, fct=fct1.__name__))
obs.append(r)
fct3(*sample)
fct3(*sample)
r = measure_time('fct3(a,b)', number=number, div_by_number=True,
context={'fct3': fct3, 'a': sample[0], 'b': sample[1]})
r.update(dict(dtype=dtype, name='onnx', n=n, fct=fct1.__name__))
obs.append(r)
df = | pandas.DataFrame(obs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from pandas.types.dtypes import DatetimeTZDtype, PeriodDtype, CategoricalDtype
from pandas.types.common import pandas_dtype, is_dtype_equal
import pandas.util.testing as tm
class TestPandasDtype(tm.TestCase):
def test_numpy_dtype(self):
for dtype in ['M8[ns]', 'm8[ns]', 'object', 'float64', 'int64']:
self.assertEqual(pandas_dtype(dtype), np.dtype(dtype))
def test_numpy_string_dtype(self):
# do not parse freq-like string as period dtype
self.assertEqual(pandas_dtype('U'), np.dtype('U'))
self.assertEqual(pandas_dtype('S'), np.dtype('S'))
def test_datetimetz_dtype(self):
for dtype in ['datetime64[ns, US/Eastern]',
'datetime64[ns, Asia/Tokyo]',
'datetime64[ns, UTC]']:
self.assertIs(pandas_dtype(dtype), DatetimeTZDtype(dtype))
self.assertEqual(pandas_dtype(dtype), DatetimeTZDtype(dtype))
self.assertEqual(pandas_dtype(dtype), dtype)
def test_categorical_dtype(self):
self.assertEqual(pandas_dtype('category'), CategoricalDtype())
def test_period_dtype(self):
for dtype in ['period[D]', 'period[3M]', 'period[U]',
'Period[D]', 'Period[3M]', 'Period[U]']:
self.assertIs(pandas_dtype(dtype), PeriodDtype(dtype))
self.assertEqual(pandas_dtype(dtype), PeriodDtype(dtype))
self.assertEqual(pandas_dtype(dtype), dtype)
dtypes = dict(datetime_tz=pandas_dtype('datetime64[ns, US/Eastern]'),
datetime= | pandas_dtype('datetime64[ns]') | pandas.types.common.pandas_dtype |
##################
##Script for the AI Financial Forecaster's Main Code and Trading Strategy Component from Team 1(CE903)
##################
#!pip install nasdaq-data-link
import Core_Infrastructure_1stHalf ##Also forms the Project Directory
from Core_Infrastructure_1stHalf import historical_data_recorder
from Core_Infrastructure_1stHalf import stock_market_dataset_preprocessor
import Core_Infrastructure_2ndHalf
from Core_Infrastructure_2ndHalf import getROC
from Core_Infrastructure_2ndHalf import willR
from Core_Infrastructure_2ndHalf import midPrice
from Core_Infrastructure_2ndHalf import TAA_Dataset_Transformer
from Core_Infrastructure_2ndHalf import TAI_Dataset_Preprocessor
import Trading_Strategy_1stHalf
from Trading_Strategy_1stHalf import MLP_model
import Trading_Strategy_2ndHalf
from Trading_Strategy_2ndHalf import modelLSTM
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
'''
###############################################
###############################################
MAIN CODE
'''
if __name__ == '__main__':
while 1:
'''
Implementation of Stock Market Data Storage
'''
dataset_source = input("Choose source of Stock Market Dataset (Device Storage/ Nasdaq API/ Yahoo Finance): ")##Needs Error Handling due to User Input
if dataset_source == "Device Storage":
company_ticker = input("\nProvide the Company Ticker (Use Project Directory if needed): ")
Stock_Market_Dataset = | pd.read_csv("C:\Group Project\Stock Market Datasets\ %s.csv" % company_ticker) | pandas.read_csv |
# encoding: utf-8
"""
Classes defined in backtest module are responsible to run backtests.
They follow a fix procedure, from loading data to looping through
data and finally save backtest results.
"""
from __future__ import print_function, unicode_literals
import six
import abc
from collections import defaultdict
import numpy as np
import pandas as pd
import datetime as dt
from jaqs.trade import common
from jaqs.data.basic import Bar
from jaqs.data.basic import Trade
import jaqs.util as jutil
from functools import reduce
def generate_cash_trade_ind(symbol, amount, date, time=200000):
trade_ind = Trade()
trade_ind.symbol = symbol
trade_ind.task_id = 0
trade_ind.entrust_no = "0"
trade_ind.set_fill_info(price=0.0, size=abs(amount), date=date, time=time, no="0", trade_date=date)
trade_ind2 = Trade()
trade_ind2.symbol = symbol
trade_ind2.task_id = 0
trade_ind2.entrust_no = "0"
trade_ind2.set_fill_info(price=1.0, size=abs(amount), date=date, time=time, no="0",trade_date=date)
if amount > 0:
trade_ind.entrust_action = common.ORDER_ACTION.BUY
trade_ind2.entrust_action = common.ORDER_ACTION.SELL
else:
trade_ind.entrust_action = common.ORDER_ACTION.SELL
trade_ind2.entrust_action = common.ORDER_ACTION.BUY
return trade_ind, trade_ind2
class BacktestInstance(six.with_metaclass(abc.ABCMeta)):
"""
BacktestInstance is an abstract base class. It can be derived to implement
various backtest tasks.
Attributes
----------
start_date : int
%YY%mm%dd, start date of the backtest.
end_date : int
%YY%mm%dd, end date of the backtest.
ctx : Context
Running context of the backtest.
props : dict
props store configurations (settings) of the backtest. Eg: start_date.
"""
def __init__(self):
super(BacktestInstance, self).__init__()
self.start_date = 0
self.end_date = 0
self.props = None
self.ctx = None
self.commission_rate = 20E-4
self.POSITION_ADJUST_NO = 101010
self.POSITION_ADJUST_TIME = 200000
self.DELIST_ADJUST_NO = 202020
self.DELIST_ADJUST_TIME = 150000
def init_from_config(self, props):
"""
Initialize parameters values for all backtest components such as
DataService, PortfolioManager, Strategy, etc.
Parameters
----------
props : dict
"""
for name in ['start_date', 'end_date']:
if name not in props:
raise ValueError("{} must be provided in props.".format(name))
self.props = props
self.start_date = props.get("start_date")
self.end_date = props.get("end_date")
self.commission_rate = props.get('commission_rate', 20E-4)
if 'symbol' in props:
self.ctx.init_universe(props['symbol'])
elif hasattr(self.ctx, 'dataview'):
self.ctx.init_universe(self.ctx.dataview.symbol)
else:
raise ValueError("No dataview, no symbol either.")
if 'init_balance' not in props:
raise ValueError("No [init_balance] provided. Please specify it in props.")
for obj in ['data_api', 'trade_api', 'pm', 'strategy']:
obj = getattr(self.ctx, obj)
if obj is not None:
obj.init_from_config(props)
'''
class AlphaBacktestInstance_OLD_dataservice(BacktestInstance):
def __init__(self):
BacktestInstance.__init__(self)
self.last_rebalance_date = 0
self.current_rebalance_date = 0
self.trade_days = None
def _is_trade_date(self, start, end, date, data_server):
if self.trade_days is None:
df, msg = data_server.daily('000300.SH', start, end, fields="close")
self.trade_days = df.loc[:, 'trade_date'].values
return date in self.trade_days
def go_next_rebalance_day(self):
"""update self.ctx.trade_date and last_date."""
if self.ctx.gateway.match_finished:
next_period_day = jutil.get_next_period_day(self.ctx.trade_date,
self.ctx.strategy.period, self.ctx.strategy.days_delay)
# update current_date: next_period_day is a workday, but not necessarily a trade date
if self.ctx.calendar.is_trade_date(next_period_day):
self.ctx.trade_date = next_period_day
else:
self.ctx.trade_date = self.ctx.calendar.get_next_trade_date(next_period_day)
self.ctx.trade_date = self.ctx.calendar.get_next_trade_date(next_period_day)
# update re-balance date
if self.current_rebalance_date > 0:
self.last_rebalance_date = self.current_rebalance_date
else:
self.last_rebalance_date = self.start_date
self.current_rebalance_date = self.ctx.trade_date
else:
# TODO here we must make sure the matching will not last to next period
self.ctx.trade_date = self.ctx.calendar.get_next_trade_date(self.ctx.trade_date)
self.last_date = self.ctx.calendar.get_last_trade_date(self.ctx.trade_date)
def run_alpha(self):
gateway = self.ctx.gateway
self.ctx.trade_date = self.start_date
while True:
self.go_next_rebalance_day()
if self.ctx.trade_date > self.end_date:
break
if gateway.match_finished:
self.on_new_day(self.last_date)
df_dic = self.ctx.strategy.get_univ_prices() # access data
self.ctx.strategy.re_balance_plan_before_open(df_dic, suspensions=[])
self.on_new_day(self.ctx.trade_date)
self.ctx.strategy.send_bullets()
else:
self.on_new_day(self.ctx.trade_date)
df_dic = self.ctx.strategy.get_univ_prices() # access data
trade_indications = gateway.match(df_dic, self.ctx.trade_date)
for trade_ind in trade_indications:
self.ctx.strategy.on_trade(trade_ind)
print "Backtest done. {:d} days, {:.2e} trades in total.".format(len(self.trade_days),
len(self.ctx.strategy.pm.trades))
def on_new_day(self, date):
self.ctx.trade_date = date
self.ctx.strategy.on_new_day(date)
self.ctx.gateway.on_new_day(date)
def save_results(self, folder='../output/'):
import pandas as pd
trades = self.ctx.strategy.pm.trades
type_map = {'task_id': str,
'entrust_no': str,
'entrust_action': str,
'symbol': str,
'fill_price': float,
'fill_size': int,
'fill_date': np.integer,
'fill_time': np.integer,
'fill_no': str}
# keys = trades[0].__dict__.keys()
ser_list = dict()
for key in type_map.keys():
v = [t.__getattribute__(key) for t in trades]
ser = pd.Series(data=v, index=None, dtype=type_map[key], name=key)
ser_list[key] = ser
df_trades = pd.DataFrame(ser_list)
df_trades.index.name = 'index'
from os.path import join
trades_fn = join(folder, 'trades.csv')
configs_fn = join(folder, 'configs.json')
fileio.create_dir(trades_fn)
df_trades.to_csv(trades_fn)
fileio.save_json(self.props, configs_fn)
print ("Backtest results has been successfully saved to:\n" + folder)
'''
class AlphaBacktestInstance(BacktestInstance):
"""
Backtest alpha strategy using DataView.
Attributes
----------
last_date : int
Last trade date before current trade date.
last_rebalance_date : int
Last re-balance date that we do re-balance.
current_rebalance_date : int
Current re-balance date that we do re-balance.
univ_price_dic : dict
Prices of symbols on current trade date.
commission_rate : float
Ratio of commission charged to turnover for each trade.
"""
def __init__(self):
super(AlphaBacktestInstance, self).__init__()
self.last_date = 0
self.last_rebalance_date = 0
self.current_rebalance_date = 0
self.univ_price_dic = {}
self.tmp_univ_price_dic_map = {}
def init_from_config(self, props):
super(AlphaBacktestInstance, self).init_from_config(props)
strategy = self.ctx.strategy
# universe = props.get('universe', "")
# symbol = props.get('symbol', "")
# if symbol and universe or len(universe.split('.')) > 1:
# if strategy.pc_method in['index_weight', 'equal_index_weight']:
# raise ValueError("{} shouldn't be used if there are both symbol and universe in props", strategy.pc_method)
def position_adjust(self):
"""
adjust happens after market close
Before each re-balance day, adjust for all dividend and cash paid actions during the last period.
We assume all cash will be re-invested.
Since we adjust our position at next re-balance day, PnL before that may be incorrect.
"""
start = self.last_rebalance_date # start will be one day later
end = self.current_rebalance_date # end is the same to ensure position adjusted for dividend on rebalance day
df_adj = self.ctx.dataview.get_ts('_daily_adjust_factor', start_date=start, end_date=end)
# FIXME: the first day should have been balanced before?
df_adj = df_adj[1:]
pm = self.ctx.pm
# Find symbols which has adj_factor not equaling 1
tmp = df_adj[df_adj!=1].fillna(0.0).sum()
adj_symbols = set(tmp[tmp!=0].index).intersection(pm.holding_securities)
#for symbol in pm.holding_securities:
for symbol in adj_symbols:
ser = df_adj.loc[:, symbol]
ser_adj = ser.dropna()
for date, ratio in ser_adj.iteritems():
pos_old = pm.get_position(symbol).current_size
# TODO pos will become float, original: int
pos_new = pos_old * ratio
pos_diff = pos_new - pos_old # must be positive
if pos_diff <= 0:
# TODO this is possible
# raise ValueError("pos_diff <= 0")
continue
trade_ind = Trade()
trade_ind.symbol = symbol
trade_ind.task_id = self.POSITION_ADJUST_NO
trade_ind.entrust_no = self.POSITION_ADJUST_NO
trade_ind.entrust_action = common.ORDER_ACTION.BUY # for now only BUY
trade_ind.set_fill_info(price=0.0, size=pos_diff,
date=date, time=200000,
no=self.POSITION_ADJUST_NO,
trade_date=date)
self.ctx.strategy.on_trade(trade_ind)
def delist_adjust(self):
df_inst = self.ctx.dataview.data_inst
start = self.last_rebalance_date # start will be one day later
end = self.current_rebalance_date # end is the same to ensure position adjusted for dividend on rebalance day
mask = np.logical_and(df_inst['delist_date'] >= start, df_inst['delist_date'] <= end)
dic_inst = df_inst.loc[mask, :].to_dict(orient='index')
if not dic_inst:
return
pm = self.ctx.pm
for symbol in pm.holding_securities.copy():
value_dic = dic_inst.get(symbol, None)
if value_dic is None:
continue
pos = pm.get_position(symbol).current_size
last_trade_date = self._get_last_trade_date(value_dic['delist_date'])
last_close_price = self.ctx.dataview.get_snapshot(last_trade_date, symbol=symbol, fields='close')
last_close_price = last_close_price.at[symbol, 'close']
trade_ind = Trade()
trade_ind.symbol = symbol
trade_ind.task_id = self.DELIST_ADJUST_NO
trade_ind.entrust_no = self.DELIST_ADJUST_NO
trade_ind.entrust_action = common.ORDER_ACTION.SELL # for now only BUY
trade_ind.set_fill_info(price=last_close_price, size=pos,
date=last_trade_date, time=150000,
no=self.DELIST_ADJUST_NO,
trade_date=last_trade_date)
self.ctx.strategy.cash += trade_ind.fill_price * trade_ind.fill_size
#self.ctx.pm.cash += trade_ind.fill_price * trade_ind.fill_size
self.ctx.strategy.on_trade(trade_ind)
def re_balance_plan_before_open(self):
"""
Do portfolio re-balance before market open (not knowing suspensions) only calculate weights.
For now, we stick to the same close price when calculate market value and do re-balance.
Parameters
----------
"""
# Step.1 set weights of those non-index-members to zero
# only filter index members when universe is defined
universe_list = self.ctx.universe
if self.ctx.dataview.universe:
df_is_member = self.ctx.dataview.get_snapshot(self.ctx.trade_date, fields='index_member')
df_is_member = df_is_member.fillna(0).astype(bool)
universe_list = df_is_member[df_is_member['index_member']].index.values
# Step.2 filter out those not listed or already de-listed
df_inst = self.ctx.dataview.data_inst
mask = np.logical_and(self.ctx.trade_date > df_inst['list_date'],
self.ctx.trade_date < df_inst['delist_date'])
listing_symbols = df_inst.loc[mask, :].index.values
universe_list = np.intersect1d(universe_list, listing_symbols)
# step.3 construct portfolio using models
self.ctx.strategy.portfolio_construction(universe_list)
def re_balance_plan_after_open(self):
"""
Do portfolio re-balance after market open.
With suspensions known, we re-calculate weights and generate orders.
Notes
-----
Price here must not be adjusted.
"""
prices = {k: v['vwap'] for k, v in self.univ_price_dic.items()}
# suspensions & limit_reaches: list of str
suspensions = self.get_suspensions()
limit_reaches = self.get_limit_reaches()
all_list = reduce(lambda s1, s2: s1.union(s2), [set(suspensions), set(limit_reaches)])
# step1. weights of those suspended and limit will be remove, and weights of others will be re-normalized
self.ctx.strategy.re_weight_suspension(all_list)
# step2. calculate market value and cash
# market value does not include those suspended
market_value_float, market_value_frozen = self.ctx.pm.market_value(prices, all_list)
#cash_available = self.ctx.pm.cash + market_value_float
cash_available = self.ctx.strategy.cash + market_value_float
cash_to_use = cash_available * self.ctx.strategy.position_ratio
cash_unuse = cash_available - cash_to_use
# step3. generate target positions
# position of those suspended will remain the same (will not be traded)
goals, cash_remain = self.ctx.strategy.generate_weights_order(self.ctx.strategy.weights, cash_to_use, prices,
suspensions=all_list)
self.ctx.strategy.goal_positions = goals
#self.ctx.pm.cash = cash_remain + cash_unuse
self.ctx.strategy.cash = cash_remain + cash_unuse
#print("cash diff: ", self.ctx.pm.cash - self.ctx.strategy.cash)
# self.liquidate_all()
total = cash_available + market_value_frozen
self.ctx.strategy.on_after_rebalance(total)
self.ctx.record('total_cash', total)
def run_alpha(self):
print("Run alpha backtest from {0} to {1}".format(self.start_date, self.end_date))
begin_time = dt.datetime.now()
tapi = self.ctx.trade_api
# Keep compatible, the original test starts with next day, not start_date
if self.ctx.strategy.period in ['week', 'month']:
self.ctx.trade_date = self._get_first_period_day()
else:
self.ctx.trade_date = self._get_next_trade_date(self.start_date)
self.last_date = self._get_last_trade_date(self.ctx.trade_date)
self.current_rebalance_date = self.ctx.trade_date
while True:
print("=======new day {}".format(self.ctx.trade_date))
# match uncome orders or re-balance
if tapi.match_finished:
# Step1.
# position adjust according to dividend, cash paid, de-list actions during the last period
# two adjust must in order
self.position_adjust()
self.delist_adjust()
# Step2.
# plan re-balance before market open of the re-balance day:
# use last trade date because strategy can only access data of last day
self.on_new_day(self.last_date)
# get index memebers, get signals, generate weights
self.re_balance_plan_before_open()
# Step3.
# do re-balance on the re-balance day
self.on_new_day(self.ctx.trade_date)
# get suspensions, get up/down limits, generate goal positions and send orders.
self.re_balance_plan_after_open()
self.ctx.strategy.send_bullets()
else:
self.on_new_day(self.ctx.trade_date)
# Deal with trade indications
# results = gateway.match(self.univ_price_dic)
results = tapi.match_and_callback(self.univ_price_dic)
for trade_ind, order_status_ind in results:
self.ctx.strategy.cash -= trade_ind.commission
#self.ctx.pm.cash -= trade_ind.commission
self.on_after_market_close()
# switch trade date
backtest_finish = self.go_next_rebalance_day()
if backtest_finish:
break
used_time = (dt.datetime.now() - begin_time).total_seconds()
print("Backtest done. {0:d} days, {1:.2e} trades in total. used time: {2}s".
format(len(self.ctx.dataview.dates), len(self.ctx.pm.trades), used_time))
#jutil.prof_print()
def on_after_market_close(self):
self.ctx.trade_api.on_after_market_close()
'''
def get_univ_prices(self, field_name='close'):
dv = self.ctx.dataview
df = dv.get_snapshot(self.ctx.trade_date, fields=field_name)
res = df.to_dict(orient='index')
return res
'''
def _is_trade_date(self, date):
if self.ctx.dataview is not None:
return date in self.ctx.dataview.dates
else:
return self.ctx.data_api.is_trade_date(date)
def _get_next_trade_date(self, date, n=1):
if self.ctx.dataview is not None:
dates = self.ctx.dataview.dates
mask = dates > date
return dates[mask][n-1]
else:
return self.ctx.data_api.query_next_trade_date(date, n)
def _get_last_trade_date(self, date):
if self.ctx.dataview is not None:
dates = self.ctx.dataview.dates
mask = dates < date
return dates[mask][-1]
else:
return self.ctx.data_api.query_last_trade_date(date)
def _get_first_period_day(self):
current = self.start_date
current_date = jutil.convert_int_to_datetime(current)
period = self.ctx.strategy.period
# set current date to first date of current period
# set offset to first date of previous period
if period == 'day':
offset = pd.tseries.offsets.BDay()
elif period == 'week':
offset = pd.tseries.offsets.Week(weekday=0)
current_date -= pd.tseries.offsets.Day(current_date.weekday())
elif period == 'month':
offset = pd.tseries.offsets.BMonthBegin()
current_date -= pd.tseries.offsets.Day(current_date.day - 1)
else:
raise NotImplementedError("Frequency as {} not support".format(period))
current_date -= offset * self.ctx.strategy.n_periods;
current = jutil.convert_datetime_to_int(current_date)
return self._get_next_period_day(current, self.ctx.strategy.period,
n=self.ctx.strategy.n_periods,
extra_offset=self.ctx.strategy.days_delay)
def _get_next_period_day(self, current, period, n=1, extra_offset=0):
"""
Get the n'th day in next period from current day.
Parameters
----------
current : int
Current date in format "%Y%m%d".
period : str
Interval between current and next. {'day', 'week', 'month'}
n : int
n times period.
extra_offset : int
n'th business day after next period.
Returns
-------
nxt : int
"""
#while True:
#for _ in range(4):
current_date = jutil.convert_int_to_datetime(current)
while current <= self.end_date:
if period == 'day':
offset = pd.tseries.offsets.BDay() # move to next business day
if extra_offset < 0:
raise ValueError("Wrong offset for day period")
elif period == 'week':
offset = pd.tseries.offsets.Week(weekday=0) # move to next Monday
if extra_offset < -5 :
raise ValueError("Wrong offset for week period")
elif period == 'month':
offset = pd.tseries.offsets.BMonthBegin() # move to first business day of next month
if extra_offset < -31:
raise ValueError("Wrong offset for month period")
else:
raise NotImplementedError("Frequency as {} not support".format(period))
offset = offset * n
begin_date = current_date + offset
if period == 'day':
end_date = begin_date + pd.tseries.offsets.Day()*366
elif period == 'week':
end_date = begin_date + pd.tseries.offsets.Day() * 6
elif period == 'month':
end_date = begin_date + pd.tseries.offsets.BMonthBegin() #- pd.tseries.offsets.BDay()
if extra_offset > 0 :
next_date = begin_date + extra_offset * pd.tseries.offsets.BDay()
if next_date >= end_date:
next_date = end_date - pd.tseries.offsets.BDay()
elif extra_offset < 0:
next_date = end_date + extra_offset * | pd.tseries.offsets.BDay() | pandas.tseries.offsets.BDay |
import datetime as dt
import pandas as pd
import numpy as np
import re
# Begin User Input Data
report_date = dt.datetime(2020, 8, 31)
wscf_market_value = 194719540.46
aqr_market_value = 182239774.63
delaware_market_value = 151551731.17
wellington_market_value = 149215529.22
qic_cash_market_value = 677011299.30
input_directory = 'U:/'
output_directory = 'U:/'
jpm_filepath = input_directory + 'CIO/#Data/input/jpm/holdings/2020/08/Priced Positions - All.csv'
wscf_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/wscf_holdings.xlsx'
aqr_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/aqr_holdings.xls'
delaware_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/delaware_holdings.xlsx'
wellington_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/wellington_holdings.xlsx'
qic_cash_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/07/qic_cash_holdings.xlsx'
tickers_filepath = input_directory + 'CIO/#Holdings/Data/input/tickers/tickers_201909.xlsx'
asx_filepath = input_directory + 'CIO/#Data/input/asx/ASX300/20200501-asx300.csv'
aeq_filepath = input_directory + 'CIO/#Holdings/Data/input/exclusions/LGS Exclusions List_December 2018_AEQ_Manager Version.xlsx'
ieq_filepath = input_directory + 'CIO/#Holdings/Data/input/exclusions/LGS Exclusions List_December 2018_IEQ_Manager Version.xlsx'
aeq_exclusions_filepath = input_directory + 'CIO/#Holdings/Data/output/exclusions/aeq_exclusions_' + str(report_date.date()) + '.csv'
ieq_exclusions_filepath = input_directory + 'CIO/#Holdings/Data/output/exclusions/ieq_exclusions_' + str(report_date.date()) + '.csv'
# End User Input Data
# Account Name to LGS Name dictionary
australian_equity_managers_dict = {
'LGS AUSTRALIAN EQUITIES - BLACKROCK': 'BlackRock',
'LGS AUSTRALIAN EQUITIES - ECP': 'ECP',
'LGS AUSTRALIAN EQUITIES DNR CAPITAL': 'DNR',
'LGS AUSTRALIAN EQUITIES - PENDAL': 'Pendal',
'LGS AUSTRALIAN EQUITIES - SSGA': 'SSGA',
'LGS AUSTRALIAN EQUITIES - UBIQUE': 'Ubique',
'LGS AUSTRALIAN EQUITIES - WSCF': 'First Sentier',
'LGS AUSTRALIAN EQUITIES REBALANCE': 'Rebalance',
'LGS AUST EQUITIES - ALPHINITY': 'Alphinity'
}
international_equity_managers_dict = {
'LGS INTERNATIONAL EQUITIES - WCM': 'WCM',
'LGS INTERNATIONAL EQUITIES - AQR': 'AQR',
'LGS INTERNATIONAL EQUITIES - HERMES': 'Hermes',
'LGS INTERNATIONAL EQUITIES - IMPAX': 'Impax',
'LGS INTERNATIONAL EQUITIES - LONGVI EW': 'Longview',
'LGS INTERNATIONAL EQUITIES - LSV': 'LSV',
'LGS INTERNATIONAL EQUITIES - MFS': 'MFS',
'LGS INTERNATIONAL EQUITIES - MACQUARIE': 'Macquarie',
'LGS INTERNATIONAL EQUITIES - WELLINGTON': 'Wellington',
'LGS GLOBAL LISTED PROPERTY - RESOLUTION': 'Resolution',
}
# Imports JPM Mandates holdings data
df_jpm = pd.read_csv(
jpm_filepath,
skiprows=[0, 1, 2, 3],
header=0,
usecols=[
'Account Number',
'Account Name',
'Security ID',
'ISIN',
'Security Name',
'Asset Type Description',
'Price Date',
'Market Price',
'Total Units',
'Total Market Value (Local)',
'Total Market Value (Base)',
'Local Currency'
],
parse_dates=['Price Date'],
infer_datetime_format=True
)
# Renames the columns into LGS column names
df_jpm = df_jpm.rename(
columns={
'Security ID': 'SEDOL',
'Asset Type Description': 'Asset Type',
'Price Date': 'Date',
'Market Price': 'Purchase Price Local',
'Total Units': 'Quantity',
'Total Market Value (Local)': 'Market Value Local',
'Total Market Value (Base)': 'Market Value AUD',
'Local Currency': 'Currency'
}
)
df_jpm['Purchase Price AUD'] = df_jpm['Market Value AUD'] / df_jpm['Quantity']
# Imports WSCF holdings data
df_wscf = pd.read_excel(
pd.ExcelFile(wscf_filepath),
sheet_name='Holdings',
skiprows=[0, 1, 2, 3, 4, 5, 6, 8],
header=0,
usecols=[
'Security SEDOL',
'Security ISIN',
'Security Name',
'Unit Holdings',
'Market Value (Local Currency)',
'Market Value (Base Currency)',
'Security Currency'
]
)
# Renames the columns into LGS column names
df_wscf = df_wscf.rename(
columns={
'Security SEDOL': 'SEDOL',
'Security ISIN': 'ISIN',
'Unit Holdings': 'Quantity',
'Market Value (Local Currency)': 'Market Value Local',
'Market Value (Base Currency)': 'Market Value AUD',
'Security Currency': 'Currency'
}
)
# Scales holdings by market value
wscf_scaling_factor = wscf_market_value/df_wscf['Market Value AUD'].sum()
df_wscf['Market Value Local'] = wscf_scaling_factor * df_wscf['Market Value Local']
df_wscf['Market Value AUD'] = wscf_scaling_factor * df_wscf['Market Value AUD']
df_wscf['Quantity'] = wscf_scaling_factor * df_wscf['Quantity']
df_wscf['Purchase Price Local'] = df_wscf['Market Value Local'] / df_wscf['Quantity']
df_wscf['Purchase Price AUD'] = df_wscf['Market Value AUD'] / df_wscf['Quantity']
df_wscf['Account Number'] = 'WSCF'
df_wscf['Account Name'] = 'LGS AUSTRALIAN EQUITIES - WSCF'
df_wscf['Date'] = report_date
df_wscf['Asset Type'] = np.nan
# Imports AQR holdings data
df_aqr = pd.read_excel(
pd.ExcelFile(aqr_filepath),
sheet_name='Holdings',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7],
header=0,
usecols=[
'Sedol',
'Isin',
'Investment Description',
'Asset Type',
'Price Local',
'Base Price',
'Quantity',
'MV Local',
'MV Base',
'Ccy'
]
)
# Renames the columns into LGS column names
df_aqr = df_aqr.rename(
columns={
'Sedol': 'SEDOL',
'Isin': 'ISIN',
'Investment Description': 'Security Name',
'Price Local': 'Purchase Price Local',
'Base Price': 'Purchase Price AUD',
'MV Local': 'Market Value Local',
'MV Base': 'Market Value AUD',
'Ccy': 'Currency'
}
)
# Scales holdings by market value
aqr_scaling_factor = aqr_market_value/df_aqr['Market Value AUD'].sum()
df_aqr['Market Value Local'] = aqr_scaling_factor * df_aqr['Market Value Local']
df_aqr['Market Value AUD'] = aqr_scaling_factor * df_aqr['Market Value AUD']
df_aqr['Quantity'] = aqr_scaling_factor * df_aqr['Quantity']
df_aqr['Account Number'] = 'AQR'
df_aqr['Account Name'] = 'LGS INTERNATIONAL EQUITIES - AQR'
df_aqr['Date'] = report_date
# Imports Delaware holdings data
df_delaware = pd.read_excel(
pd.ExcelFile(delaware_filepath),
sheet_name='EM SICAV holdings 7-31-2020',
header=0,
usecols=[
'Security SEDOL',
'Security ISIN',
'Security Description (Short)',
'Position Date',
'Shares/Par',
'Trading Currency',
'Traded Market Value (Local)',
'Traded Market Value (AUD)'
]
)
# Renames the columns into LGS column names
df_delaware = df_delaware.rename(
columns={
'Security SEDOL': 'SEDOL',
'Security ISIN': 'ISIN',
'Security Description (Short)': 'Security Name',
'Position Date': 'Date',
'Shares/Par': 'Quantity',
'Trading Currency': 'Currency',
'Traded Market Value (Local)': 'Market Value Local',
'Traded Market Value (AUD)': 'Market Value AUD'
}
)
# Scales holdings by market value
delaware_scaling_factor = delaware_market_value/df_delaware['Market Value AUD'].sum()
df_delaware['Market Value Local'] = delaware_scaling_factor * df_delaware['Market Value Local']
df_delaware['Market Value AUD'] = delaware_scaling_factor * df_delaware['Market Value AUD']
df_delaware['Quantity'] = delaware_scaling_factor * df_aqr['Quantity']
df_delaware['Purchase Price Local'] = df_delaware['Market Value Local'] / df_delaware['Quantity']
df_delaware['Purchase Price AUD'] = df_delaware['Market Value AUD'] / df_delaware['Quantity']
df_delaware['Account Number'] = 'MACQUARIE'
df_delaware['Account Name'] = 'LGS INTERNATIONAL EQUITIES - MACQUARIE'
df_delaware['Date'] = report_date
# Imports Wellington holdings data
df_wellington = pd.read_excel(
pd.ExcelFile(wellington_filepath),
sheet_name='wellington_holdings',
header=0,
usecols=[
'SEDOL',
'ISIN',
'Security',
'Shares or Par Value',
'ISO Code',
'Market Value (Local)',
'Market Value (Report Currency)'
]
)
# Renames the columns into LGS column names
df_wellington = df_wellington.rename(
columns={
'Security': 'Security Name',
'Shares or Par Value': 'Quantity',
'ISO Code': 'Currency',
'Market Value (Local)': 'Market Value Local',
'Market Value (Report Currency)': 'Market Value AUD'
}
)
# Scales holdings by market value
wellington_scaling_factor = wellington_market_value/df_wellington['Market Value AUD'].sum()
df_wellington['Market Value Local'] = wellington_scaling_factor * df_wellington['Market Value Local']
df_wellington['Market Value AUD'] = wellington_scaling_factor * df_wellington['Market Value AUD']
df_wellington['Quantity'] = wellington_scaling_factor * df_wellington['Quantity']
df_wellington['Purchase Price Local'] = df_wellington['Market Value Local'] / df_wellington['Quantity']
df_wellington['Purchase Price AUD'] = df_wellington['Market Value AUD'] / df_wellington['Quantity']
df_wellington['Account Number'] = 'WELLINGTON'
df_wellington['Account Name'] = 'LGS INTERNATIONAL EQUITIES - WELLINGTON'
df_wellington['Date'] = report_date
df_qic_cash = pd.read_excel(
pd.ExcelFile(qic_cash_filepath),
sheet_name='Risk and Exposure',
header=4,
usecols=[
'ISIN',
'Security Description',
'Security Type',
'Currency',
'Market Value %'
]
)
df_qic_cash = df_qic_cash.rename(
columns={
'Security Description': 'Security Name',
'Security Type': 'Asset Type'
}
)
df_qic_cash['Market Value Local'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Market Value AUD'] = df_qic_cash['Market Value %'] * qic_cash_market_value
df_qic_cash['Quantity'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Purchase Price Local'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Purchase Price AUD'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Account Number'] = 'QIC Cash'
df_qic_cash['Account Name'] = 'LGS CASH - QIC CASH'
df_qic_cash['Date'] = report_date
df_qic_cash = df_qic_cash.drop(columns=['Market Value %'], axis=1)
df_qic_cash = df_qic_cash[~df_qic_cash['Security Name'].isin([np.nan])].reset_index(drop=True)
# Joins all the dataframes
df_main = pd.concat([df_jpm, df_wscf, df_aqr, df_delaware, df_wellington], axis=0, sort=True).reset_index(drop=True)
# Outputs all of the holdings
df_main_all = df_main.copy()
df_main_all = df_main_all.drop(['Date'], axis=1)
df_main_all.to_csv(output_directory + 'CIO/#Data/output/holdings/all_holdings.csv', index=False)
# <NAME> Spreadsheet
df_cp = df_main_all[['Account Name', 'Security Name', 'Market Value AUD']]
df_cp.to_csv(output_directory + 'CIO/#Data/output/holdings/craigpete.csv', index=False)
# Selects Australian Equity and International Equity managers only JANA
df_main_all_aeq = df_main_all[df_main_all['Account Name'].isin(australian_equity_managers_dict)].reset_index(drop=True)
df_main_all_ieq = df_main_all[df_main_all['Account Name'].isin(international_equity_managers_dict)].reset_index(drop=True)
# Writes to excel file for JANA
writer = | pd.ExcelWriter(output_directory + 'CIO/#Data/output/holdings/jana/aeq_holdings.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
import json
import sys
print(sys.executable, sys.version)
import numpy as np
import pandas as pd
from os.path import join, abspath
# for local import
sys.path.append(abspath('..'))
from main.config import Config
from main.model import Model
mapping = {
'mpii_3d': 'mpi',
'h36m_filtered': 'h36m',
'h36m': 'h36m',
'total_cap': 'TC'
}
def eval_per_sequence(sequences, all_kp3d_mpjpe, all_kp3d_mpjpe_aligned):
# sort sequence
eval_dict = {}
for i, sequence in zip(range(len(sequences)), sequences):
sequence = sequence.decode('utf-8')
if ' 2_' in sequence or ' 3_' in sequence: # ignore camera 2 and 3 for mpii 3d
continue
mpjpe_tuple = tuple([all_kp3d_mpjpe_aligned[i], all_kp3d_mpjpe[i]])
if sequence not in eval_dict:
eval_dict[sequence] = [mpjpe_tuple]
else:
eval_dict[sequence].append(mpjpe_tuple)
seq_names, data = eval_dict.keys(), np.ndarray(shape=(len(eval_dict), 9), dtype=np.float32)
for i, value in zip(range(len(seq_names)), eval_dict.values()):
values_per_seq = np.asarray(value, dtype=np.float32)
mpjpe_aligned = values_per_seq[:, 0, :]
mpjpe = values_per_seq[:, 1, :]
mean_mpjpe_aligned = np.mean(mpjpe_aligned)
mean_mpjpe = np.mean(mpjpe)
data[i, 0] = mean_mpjpe_aligned # mean_error_aligned
data[i, 1] = mean_mpjpe # mean_error
data[i, 2] = np.median(mpjpe_aligned) # median_error
data[i, 3] = np.std(mpjpe_aligned) # standard deviation
data[i, 4] = mpjpe_aligned.min() # min
data[i, 5] = mpjpe_aligned.max() # max
data[i, 6] = np.percentile(mpjpe_aligned, 25) # 25 percentile
data[i, 7] = np.percentile(mpjpe_aligned, 50) # 50 percentile
data[i, 8] = np.percentile(mpjpe_aligned, 75) # 75 percentile
columns = ['Mean Aligned', 'Mean', 'Median', 'Standard Deviation', 'Min', 'Max', '25%', '50%', '75%']
df_seq = | pd.DataFrame(data, index=seq_names, columns=columns) | pandas.DataFrame |
from datetime import timedelta
from os import linesep
import pandas as pd
import pytest
from pykusto import PyKustoClient, Order, Nulls, JoinKind, Distribution, BagExpansion, column_generator as col, Functions as f, Query, JoinException
# noinspection PyProtectedMember
from pykusto._src.type_utils import _KustoType
from test.test_base import TestBase, mock_databases_response, MockKustoClient, mock_response
from test.test_base import mock_table as t, mock_columns_response
from test.udf import func, STRINGIFIED
class TestQuery(TestBase):
def test_sanity(self):
# test concatenation #
self.assertEqual(
"mock_table | where numField > 4 | take 5 | sort by stringField asc nulls last",
Query(t).where(t.numField > 4).take(5).sort_by(t.stringField, Order.ASC, Nulls.LAST).render(),
)
def test_add_queries(self):
query_a = Query(t).where(t.numField > 4)
query_b = Query(t).take(5)
query_c = Query(t).where(t.numField2 > 1).sort_by(t.stringField, Order.ASC, Nulls.LAST)
query = query_a + query_b + query_c
self.assertEqual(
"mock_table | where numField > 4 | take 5 | where numField2 > 1 | sort by stringField asc nulls last",
query.render(),
)
# make sure the originals didn't change
self.assertEqual(
"mock_table | where numField > 4",
query_a.render(),
)
self.assertEqual(
"mock_table | take 5",
query_b.render(),
)
self.assertEqual(
"mock_table | where numField2 > 1 | sort by stringField asc nulls last",
query_c.render(),
)
def test_add_queries_with_table(self):
table = PyKustoClient(MockKustoClient(columns_response=mock_columns_response([('numField', _KustoType.INT)])))['test_db']['mock_table']
query_a = Query(table).where(table.numField > 4)
query_b = Query(t).take(5).take(2).sort_by(t.stringField, Order.ASC, Nulls.LAST)
query = query_a + query_b
self.assertEqual(
"mock_table | where numField > 4 | take 5 | take 2 | sort by stringField asc nulls last",
query.render(),
)
# make sure the originals didn't change
self.assertEqual(
"mock_table | where numField > 4",
query_a.render(),
)
self.assertEqual(
"mock_table | take 5 | take 2 | sort by stringField asc nulls last",
query_b.render(),
)
def test_add_queries_with_table_name(self):
query_a = Query('mock_table').where(col.numField > 4)
query_b = Query().take(5)
query = query_a + query_b
self.assertEqual(
"mock_table | where numField > 4 | take 5",
query.render(),
)
self.assertEqual(
"mock_table",
query.get_table_name(),
)
# make sure the originals didn't change
self.assertEqual(
"mock_table | where numField > 4",
query_a.render(),
)
self.assertEqual(
"mock_table",
query_a.get_table_name(),
)
self.assertEqual(
" | take 5",
query_b.render(),
)
self.assertEqual(
None,
query_b.get_table_name(),
)
def test_pretty_render(self):
query = Query('mock_table').where(col.numField > 4).take(5)
self.assertEqual(
"mock_table" + linesep +
"| where numField > 4" + linesep +
"| take 5",
query.pretty_render(),
)
def test_where(self):
self.assertEqual(
"mock_table | where numField > 4",
Query(t).where(t.numField > 4).render(),
)
def test_where_multiple_predicates(self):
self.assertEqual(
'mock_table | where boolField and (numField > numField2) and (stringField contains "hello")',
Query(t).where(t.boolField, t.numField > t.numField2, t.stringField.contains('hello')).render(),
)
def test_where_no_predicates(self):
self.assertEqual(
'mock_table | project numField',
Query(t).where().project(t.numField).render(),
)
def test_where_true_predicate(self):
self.assertEqual(
'mock_table | where boolField | project numField',
Query(t).where(t.boolField, True).project(t.numField).render(),
)
def test_where_only_true_predicate(self):
self.assertEqual(
'mock_table | project numField',
Query(t).where(True).project(t.numField).render(),
)
def test_where_false_predicate(self):
self.assertEqual(
'mock_table | where false | project numField',
Query(t).where(t.boolField, False).project(t.numField).render(),
)
def test_where_not(self):
self.assertEqual(
"mock_table | where not(boolField)",
Query(t).where(f.not_of(t.boolField)).render(),
)
def test_take(self):
self.assertEqual(
"mock_table | take 3",
Query(t).take(3).render(),
)
def test_sort(self):
self.assertEqual(
"mock_table | sort by numField desc nulls first",
Query(t).sort_by(t.numField, order=Order.DESC, nulls=Nulls.FIRST).render(),
)
def test_order(self):
self.assertEqual(
"mock_table | sort by numField desc nulls first",
Query(t).order_by(t.numField, order=Order.DESC, nulls=Nulls.FIRST).render(),
)
def test_order_expression_in_arg(self):
self.assertEqual(
"mock_table | sort by strlen(stringField) desc nulls first",
Query(t).order_by(f.strlen(t.stringField), order=Order.DESC, nulls=Nulls.FIRST).render(),
)
def test_sort_multiple_cols(self):
self.assertEqual(
"mock_table | sort by stringField desc nulls first, numField asc nulls last",
Query(t).sort_by(t.stringField, order=Order.DESC, nulls=Nulls.FIRST).then_by(t.numField, Order.ASC, Nulls.LAST).render(),
)
def test_no_params_for_sort(self):
self.assertEqual(
"mock_table | sort by numField, stringField",
Query(t).sort_by(t.numField).then_by(t.stringField).render(),
)
self.assertEqual(
"mock_table | sort by numField desc nulls first, stringField",
Query(t).sort_by(t.numField, order=Order.DESC, nulls=Nulls.FIRST).then_by(t.stringField).render(),
)
def test_top(self):
self.assertEqual(
"mock_table | top 3 by numField desc nulls first",
Query(t).top(3, t.numField, order=Order.DESC, nulls=Nulls.FIRST).render(),
)
def test_join_with_table(self):
table = PyKustoClient(MockKustoClient(columns_response=mock_columns_response([('tableStringField', _KustoType.STRING), ('numField', _KustoType.INT)])))['test_db'][
'mock_table']
self.assertEqual(
'mock_table | where numField > 4 | take 5 | join kind=inner (cluster("test_cluster.kusto.windows.net").database("test_db").table("mock_table")) '
'on numField, $left.stringField==$right.tableStringField',
(
Query(t)
.where(t.numField > 4).take(5)
.join(Query(table), kind=JoinKind.INNER)
.on(t.numField, (t.stringField, table.tableStringField))
.render()
)
)
def test_join_with_table_and_query(self):
table = PyKustoClient(MockKustoClient(columns_response=mock_columns_response([
('tableStringField', _KustoType.STRING), ('numField', _KustoType.INT)
])))['test_db']['mock_table']
self.assertEqual(
'mock_table | where numField > 4 | take 5 | join kind=inner (cluster("test_cluster.kusto.windows.net").database("test_db").table("mock_table") | where numField == 2 '
'| take 6) on numField, $left.stringField==$right.tableStringField',
(
Query(t)
.where(t.numField > 4)
.take(5)
.join(Query(table).where(table.numField == 2).take(6), kind=JoinKind.INNER)
.on(t.numField, (t.stringField, table.tableStringField))
.render()
)
)
def test_join_chained_on(self):
mock_client = PyKustoClient(
MockKustoClient(
columns_response=mock_columns_response(
[('tableStringField', _KustoType.STRING), ('numField', _KustoType.INT)]
)
)
)
mock_table = mock_client['test_db']['mock_table']
expected_query = (
'mock_table | where numField > 4 | take 5 | join kind=inner '
'(cluster("test_cluster.kusto.windows.net").database("test_db").table("mock_table")'
' | where numField == 2 | take 6) on numField, $left.stringField==$right.tableStringField'
)
actual_query = (
Query(t)
.where(t.numField > 4)
.take(5)
.join(Query(mock_table).where(mock_table.numField == 2).take(6), kind=JoinKind.INNER)
.on(t.numField)
.on((t.stringField, mock_table.tableStringField))
.render()
)
self.assertEqual(expected_query, actual_query)
def test_join_no_joined_table(self):
self.assertRaises(
JoinException("The joined query must have a table"),
lambda: Query(t).where(t.numField > 4).take(5).join(Query().take(2), kind=JoinKind.INNER).on(t.numField, (t.stringField, t.stringField2)).render()
)
def test_join_no_on(self):
self.assertRaises(
JoinException("A call to join() must be followed by a call to on()"),
Query(t).where(t.numField > 4).take(5).join(
Query(t).take(2), kind=JoinKind.INNER).render
)
@pytest.mark.skip(reason="Re-enable once this is resoled: https://github.com/agronholm/typeguard/issues/159")
def test_join_wrong_arguments_type(self):
col_name_str = "numField"
# noinspection PyTypeChecker
self.assertRaises(
JoinException(
"A join argument could be a column, or a tuple of two columns corresponding to the input and join "
f"tables column names. However, the join argument provided is {col_name_str} of type {type(col_name_str)}"
),
lambda: (
Query(t)
.where(t.numField > 4)
.take(5)
.join(Query(t).take(2), kind=JoinKind.INNER)
.on(col_name_str)
.render()
)
)
def test_extend(self):
self.assertEqual(
"mock_table | extend sumField = numField + numField2, foo = numField3 * 4 | take 5",
Query(t).extend((t.numField + t.numField2).assign_to(col.sumField), foo=t.numField3 * 4).take(5).render(),
)
def test_extend_assign_to_multiple_columns(self):
self.assertEqual(
"mock_table | extend (newField1, newField2) = arrayField, shoo = numField * 4",
Query(t).extend(t.arrayField.assign_to(col.newField1, col.newField2), shoo=t.numField * 4).render(),
)
def test_extend_assign_non_array_to_multiple_columns(self):
self.assertRaises(
ValueError("Only arrays can be assigned to multiple columns"),
lambda: t.stringField.assign_to(col.newField1, col.newField2),
)
def test_extend_generate_column_name(self):
self.assertEqual(
"mock_table | extend numField + numField2, foo = numField3 * 4",
Query(t).extend(t.numField + t.numField2, foo=t.numField3 * 4).render(),
)
def test_extend_build_dynamic(self):
self.assertEqual(
'mock_table | extend foo = pack("Name", stringField, "Roles", pack_array(stringField2, stringField3))',
Query(t).extend(foo={'Name': t.stringField, 'Roles': [t.stringField2, t.stringField3]}).render(),
)
def test_summarize(self):
self.assertEqual(
"mock_table | summarize count(stringField), my_count = count(stringField2)",
Query(t).summarize(f.count(t.stringField), my_count=f.count(t.stringField2)).render(),
)
def test_summarize_by(self):
self.assertEqual(
"mock_table | summarize count(stringField), my_count = count(stringField2) by boolField, bin(numField, 1), time_range = bin(dateField, time(0.0:0:10.0))",
Query(t).summarize(
f.count(t.stringField),
my_count=f.count(t.stringField2)
).by(t.boolField, f.bin(t.numField, 1), time_range=f.bin(t.dateField, timedelta(seconds=10))).render(),
)
def test_summarize_by_expression(self):
self.assertEqual(
"mock_table | summarize count(stringField) by tostring(mapField)",
Query(t).summarize(f.count(t.stringField)).by(f.to_string(t.mapField)).render(),
)
def test_mv_expand(self):
self.assertEqual(
"mock_table | mv-expand arrayField, arrayField2, arrayField3",
Query(t).mv_expand(t.arrayField, t.arrayField2, t.arrayField3).render(),
)
def test_mv_expand_assign(self):
self.assertEqual(
"mock_table | mv-expand expanded_field = arrayField",
Query(t).mv_expand(expanded_field=t.arrayField).render(),
)
def test_mv_expand_assign_to(self):
self.assertEqual(
"mock_table | mv-expand expanded_field = arrayField",
Query(t).mv_expand(t.arrayField.assign_to(col.expanded_field)).render(),
)
def test_mv_expand_assign_to_with_assign_other_params(self):
self.assertEqual(
"mock_table | mv-expand bagexpansion=bag with_itemindex=foo expanded_field = arrayField, expanded_field2 = arrayField2 limit 4",
Query(t).mv_expand(
t.arrayField.assign_to(col.expanded_field), expanded_field2=t.arrayField2, bag_expansion=BagExpansion.BAG, with_item_index=col.foo, limit=4
).render(),
)
def test_mv_expand_assign_multiple(self):
self.assertEqual(
"mock_table | mv-expand expanded_field = arrayField, expanded_field2 = arrayField2",
Query(t).mv_expand(expanded_field=t.arrayField, expanded_field2=t.arrayField2).render(),
)
def test_mv_expand_to_type(self):
self.assertEqual(
"mock_table | mv-expand arrayField to typeof(string), arrayField2 to typeof(int), arrayField3",
Query(t).mv_expand(f.to_type(t.arrayField, _KustoType.STRING), f.to_type(t.arrayField2, _KustoType.INT), t.arrayField3).render(),
)
def test_mv_expand_args(self):
self.assertEqual(
"mock_table | mv-expand bagexpansion=bag with_itemindex=foo arrayField, arrayField2, arrayField3 limit 4",
Query(t).mv_expand(t.arrayField, t.arrayField2, t.arrayField3, bag_expansion=BagExpansion.BAG, with_item_index=col.foo, limit=4).render(),
)
def test_mv_expand_no_args(self):
self.assertRaises(
ValueError("Please specify one or more columns for mv-expand"),
Query(t).mv_expand
)
def test_limit(self):
self.assertEqual(
"mock_table | limit 3",
Query(t).limit(3).render(),
)
def test_sample(self):
self.assertEqual(
"mock_table | sample 3",
Query(t).sample(3).render(),
)
def test_count(self):
self.assertEqual(
"mock_table | count",
Query(t).count().render(),
)
def test_project(self):
self.assertEqual(
"mock_table | project stringField, numField",
Query(t).project(t.stringField, t.numField).render(),
)
def test_project_with_expression(self):
self.assertEqual(
"mock_table | project foo = numField * 4",
Query(t).project(foo=t.numField * 4).render(),
)
def test_project_assign_to_multiple_columns(self):
self.assertEqual(
"mock_table | project (foo, bar) = arrayField",
Query(t).project(t.arrayField.assign_to(col.foo, col.bar)).render(),
)
def test_project_unspecified_column(self):
self.assertEqual(
"mock_table | project numField + numField2",
Query(t).project(t.numField + t.numField2).render(),
)
def test_project_away(self):
self.assertEqual(
"mock_table | project-away stringField, numField",
Query(t).project_away(t.stringField, t.numField).render(),
)
def test_project_away_wildcard(self):
self.assertEqual(
"mock_table | project-away stringField, b*",
Query(t).project_away(t.stringField, "b*").render(),
)
def test_project_rename(self):
self.assertEqual(
"mock_table | project-rename a = stringField, c = numField",
Query(t).project_rename(a=t.stringField, c=t.numField).render(),
)
def test_custom(self):
self.assertEqual(
"mock_table | some custom query",
Query(t).custom("some custom query").render(),
)
def test_distinct(self):
self.assertEqual(
"mock_table | distinct stringField, numField",
Query(t).distinct(t.stringField, t.numField).render(),
)
def test_distinct_sample(self):
self.assertEqual(
"mock_table | sample-distinct 5 of stringField",
Query(t).distinct(t.stringField).sample(5).render(),
)
def test_top_hitters(self):
self.assertEqual(
"mock_table | top-hitters 5 of stringField",
Query(t).distinct(t.stringField).top_hitters(5).render(),
)
def test_top_hitters_by(self):
self.assertEqual(
"mock_table | top-hitters 5 of stringField by numField",
Query(t).distinct(t.stringField).top_hitters(5).by(t.numField).render(),
)
def test_distinct_all(self):
self.assertEqual(
"mock_table | distinct *",
Query(t).distinct_all().render(),
)
def test_evaluate(self):
self.assertEqual(
"mock_table | evaluate some_plugin(numField, 3)",
Query(t).evaluate('some_plugin', t.numField, 3).render(),
)
def test_evaluate_with_distribution(self):
self.assertEqual(
"mock_table | evaluate hint.distribution=per_shard some_plugin(numField, 3)",
Query(t).evaluate('some_plugin', t.numField, 3, distribution=Distribution.PER_SHARD).render(),
)
def test_udf(self):
# The static type checker mistakenly thinks func is not of type "FunctionType"
# noinspection PyTypeChecker
self.assertEqual(
f"mock_table | evaluate python(typeof(*, StateZone:string), {STRINGIFIED})",
Query(t).evaluate_udf(func, StateZone=_KustoType.STRING).render(),
)
def test_udf_no_extend(self):
# The static type checker mistakenly thinks func is not of type "FunctionType"
# noinspection PyTypeChecker
self.assertEqual(
f"mock_table | evaluate python(typeof(StateZone:string), {STRINGIFIED})",
Query(t).evaluate_udf(func, extend=False, StateZone=_KustoType.STRING).render(),
)
def test_bag_unpack(self):
self.assertEqual(
"mock_table | evaluate bag_unpack(mapField)",
Query(t).bag_unpack(t.mapField).render(),
)
def test_bag_unpack_with_prefix(self):
self.assertEqual(
'mock_table | evaluate bag_unpack(mapField, "bar_")',
Query(t).bag_unpack(t.mapField, 'bar_').render(),
)
def test_to_dataframe(self):
rows = (['foo', 10], ['bar', 20], ['baz', 30])
columns = ('stringField', 'numField')
client = PyKustoClient(MockKustoClient(
databases_response=mock_databases_response([('test_db', [('mock_table', [('stringField', _KustoType.STRING), ('numField', _KustoType.INT)])])]),
main_response=mock_response(rows, columns),
))
client.wait_for_items()
table = client.test_db.mock_table
actual_df = Query(table).take(10).to_dataframe()
expected_df = | pd.DataFrame(rows, columns=columns) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../../../input/giripujar_hr-analytics/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.formula.api as sm
from statsmodels.stats.outliers_influence import variance_inflation_factor
from patsy import dmatrices
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import KFold
from sklearn import metrics
from sklearn.model_selection import train_test_split
print()
print(os.listdir("../../../input/giripujar_hr-analytics"))
# Any results you write to the current directory are saved as output.
# In[ ]:
data = pd.read_csv("../../../input/giripujar_hr-analytics/HR_comma_sep.csv")
# In[ ]:
data.head(5)
# In[ ]:
data.shape
# In[ ]:
data.info()
# In[ ]:
## Let's separate numerical and categorical vaiables into 2 dfs
def sep_data(df):
numerics = ['int32','float32','int64','float64']
num_data = df.select_dtypes(include=numerics)
cat_data = df.select_dtypes(exclude=numerics)
return num_data, cat_data
num_data,cat_data = sep_data(data)
# In[ ]:
## Let's create a summary of Numerical Variables
def print_summary(x):
return pd.Series([x.count(), x.isnull().sum(), x.mean(), x.median(), x.std(), x.var(), x.min(), x.max(), x.dropna().quantile(0.25), x.dropna().quantile(0.75), x.dropna().quantile(0.90) ], index = ["Number of Observations", "Missing Values", "Mean", "Median", "Standard Deviation", "Variance", "Minimum Value", "Maximum Value", "25th Percentile", "75th Percentile", "90th Percentile"])
numerical_summary = num_data.apply(func = print_summary)
# In[ ]:
numerical_summary
# In[ ]:
## Separate X and Y variables
y = data.loc[:,'left']
X = pd.DataFrame(data.drop(columns='left'))
# In[ ]:
X.info()
# <font color = 'indigo' size= "12"><b><body style="background-color:lightgrey;">Below is what we should try :</b></body></font>
# 1. VIF - Which variables are highly correlated
# 2. Check odds ratio - to see variance in data
# 3. Run a Logistic Regression model to see most impactful variables (use OneHotEncoding for Categorical Variables)
# 4. Run simple Decision Trees to see the explainability of data (EDA)
# 5. Check prediction power of Decision Trees
# 4. Run Random Forest with:
# - Grid Search
# - K Fold cross validations
# - SMOTE
# - Regularization
# - Tree Pruning & Other hyperparameter tuning
# - Confusion Matrics
# - ROC
# - Boosting, GBM and xGBoost
# In[ ]:
plt.rcParams['figure.figsize'] = 16, 7.5
print()
# In[ ]:
## 1. Let's run VIF to check highly correlated and hence redundant variables.
features = num_data.drop(columns='left')
feature_list = "+".join(features.columns)
y, X = dmatrices('left~'+feature_list,num_data,return_type='dataframe')
# In[ ]:
vif = pd.DataFrame()
vif['VIF Factor'] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
vif['Features'] = X.columns
# In[ ]:
vif
# ### <font color = 'green'> The above table shows that there is no variable with a 'high" Variance Inflation Factor ... So, this method suggests we should not drop any variable
# In[ ]:
## 2. Log odds plot - to see variance in data
for feature in num_data.columns.difference(['left']):
binned = pd.cut(num_data[feature],bins=10,labels=list(range(1,11)))
binned = binned.dropna()
ser = num_data.groupby(binned)['left'].sum() / (num_data.groupby(binned)['left'].count() - num_data.groupby(binned)['left'].sum())
ser = np.log(ser)
fig,axes = plt.subplots(figsize=(16,8))
sns.barplot(x=ser.index,y=ser)
plt.ylabel('Log Odds Ratio')
# ### <font color = 'blue'> The above graphs will help us bin the categorical variables better, if need be. </font>
# ### <font color = 'orange'> The following block of code will be used to perform One Hot Encoding of categorical variables - It will also rename the One Hot Encoded Columns</font>
# In[ ]:
## One hot encoding will be done on categorical variables - salary and department ...
## We need to first run label coding before using OneHotEncoding
ohe_columns = data.select_dtypes(include='object').columns
le = LabelEncoder()
data_le = data.copy()
for column in ohe_columns:
data_le.loc[:,column] = le.fit_transform(data_le.loc[:,column])
## One Hot Encoding method takes arrays as X, hence we need to convert features into arrays and remove headings.
X = data_le.drop(columns='left').values ## This approach will create rows of arrays which need to be passed to OneHotEncoder
y = data_le.loc[:,'left'] ## This does not require array - hence we are just copying and not using .values
ohe = OneHotEncoder(categorical_features=[7,8]) ## This method takes index location of categorical variables in X array as input
X = ohe.fit_transform(X).toarray()
## Let's convert X into Data Frame
X = pd.DataFrame(X)
## Maintain columns that are unaffected by OneHotEncoding separately
total_cols = data.columns
cols_maintained = total_cols.drop(['Department','salary','left'])
## Column names for OneHotEncoded Columns - One by one
## 1. For Department
for ind in range(data[ohe_columns[0]].value_counts().count()):
a = X[X[ind] == 1].index.values.astype(int) ## For any column, check where is "1" present as a value after OneHotEncoding
name_idx = a[0] ## Index of first occurance of "1"
name = data.loc[a[0],ohe_columns[0]] ## Value in "Department" column in data DataFrame
col_name = ohe_columns[0] + "_" + name ## Concatenate "Department_" + Value as the new column name
X.rename(columns={ind:col_name},inplace=True) ## Rename the column
## 2. For Salary
for ind in range(data[ohe_columns[0]].value_counts().count(),(data[ohe_columns[0]].value_counts().count() + 3)):
a = X[X[ind] == 1].index.values.astype(int) ## For any column, check where is "1" present as a value after OneHotEncoding
name_idx = a[0] ## Index of first occurance of "1"
name = data.loc[a[0],ohe_columns[1]] ## Value in "Salary" column in data DataFrame
col_name = ohe_columns[1] + "_" + name ## Concatenate "Salary_" + Value as the new column name
X.rename(columns={ind:col_name},inplace=True) ## Rename the column
## 3. For columns unchanged by OneHotEncoding
counter = 0
for ind in range((data[ohe_columns[0]].value_counts().count() + 3),(len(X.columns))):
X.rename(columns={ind:cols_maintained[counter]},inplace=True)
counter = counter + 1
# In[ ]:
## Let's run Logistic Regression now ....
## First, we need to split data into train and test
## Scenario 1 --> where all dummy classes are present ....
train_X, test_X, train_y, test_y = train_test_split(X,y,test_size = 0.3,random_state = 142)
model = sm.Logit(train_y,train_X)
result = model.fit()
# In[ ]:
result.summary()
# In[ ]:
result.pvalues
# In[ ]:
## Drop one of the dummy variables for each OneHotEncoded variable
train_X_2 = train_X.drop(columns=['Department_IT','salary_high'])
# In[ ]:
## Scenario 2 --> Run Logistic on Xs with dropped data - avoiding dummy variable trap
model_2 = sm.Logit(train_y,train_X_2)
result_2 = model_2.fit()
result_2.summary()
# In[ ]:
result_2.pvalues
# ### <font color = 'maroon'> Here, we see that by dropping one variable for every dummy one hot encoded class, suddenly a lot of variables for "Department" and "Salary" become impatful. ... Hence, it is a best practice to keep (N-1) dummy variables for every variable with N unique values </font>
# In[ ]:
test_X_2 = test_X.drop(columns=['Department_IT','salary_high'])
## Create a data frame with 2 columns - one has the predicted probability and the other has the actual class
predict_2 = pd.DataFrame(result_2.predict(test_X_2))
predict_2.rename(columns={0:'pred_prob'},inplace=True)
predict_test = | pd.concat([predict_2,test_y],axis=1) | pandas.concat |
import glob
import itertools
import os
from datetime import datetime
from itertools import chain
import chart_studio.plotly as py
import matplotlib.pyplot as plt
import missingno as msno
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import IterativeImputer
from sklearn.linear_model import BayesianRidge
from xgboost import XGBRegressor
WEEKS_YEAR = {2019: 52, 2020: 53, 2021: 12}
#####################
# GENERAL FUNCTIONS #
#####################
def re_assert_cod_datatypes(df):
dtypes = {
"COD_DPTO": "object",
"COD_MUNICIPIO": "object",
"SEMANA": "object",
"ANO": "object",
"DENGUE": "UInt16",
"DENGUE GRAVE": "UInt16",
"MORTALIDAD POR DENGUE": "UInt8",
"CODIGOESTACION": "object",
}
for col in df.columns:
if col in dtypes.keys():
df[col] = df[col].astype(dtypes[col])
if col == "SEMANA" or col == "COD_DPTO":
df[col] = df[col].apply(lambda x: str(int(x)).zfill(2))
return df
def get_sivigila_calendar():
date = pd.to_datetime("2006-01-01")
week = 1
weeks = []
year = 2006
sivigila_53_years = [2008, 2014, 2020]
day = 1
while year < 2022:
if week == 53:
if not (year in sivigila_53_years):
year += 1
week = 1
elif week > 53:
year += 1
week = 1
# if day == 1:
# year = date.year
weeks.append((date, year, week))
day += 1
if day == 8:
week += 1
day = 1
date += pd.DateOffset(days=1)
columns = ["FECHA", "ANO", "SEMANA"]
df_weeks = pd.DataFrame(weeks, columns=columns)
df_weeks = df_weeks.set_index("FECHA")
df_weeks = re_assert_cod_datatypes(df_weeks)
return df_weeks
####################
# DENGUE FUNCTIONS #
####################
def unify_column_names_dengue(columns):
new_columns = []
for col in columns:
if "evento" == col.lower():
new_columns.append("NOMBRE")
elif "cod_e" in col.lower():
new_columns.append("COD_EVE")
elif "cod_d" in col.lower():
new_columns.append("COD_DPTO")
elif "cod_m" in col.lower():
new_columns.append("COD_MUNICIPIO")
elif "dato" in col.lower() or "conteo" in col.lower():
new_columns.append("TOTAL_CASOS")
else:
new_columns.append(col.upper())
return new_columns
def get_dengue_weeks_from_rutinaria(filename, codes, sheet_num, year):
df = pd.read_excel(filename, sheet_name=sheet_num)
df.columns = unify_column_names_dengue(list(df.columns))
df["ANO"] = year
df = df[~df["COD_DPTO"].isin([0, 1])]
df = df[df.COD_EVE.isin(codes)]
df_pivoted = df.pivot_table(
values="TOTAL_CASOS",
columns="NOMBRE",
index=["ANO", "COD_DPTO", "COD_MUNICIPIO", "SEMANA"],
).reset_index()
return df_pivoted
def read_all_rutinarias(codes=[210, 220, 580]):
dfs = []
for year in range(2007, 2019):
print("Doing year ", year)
filename = "local/data/src_general/rutinarias_dengue/rutinaria_{}.xlsx".format(
year
)
df = get_dengue_weeks_from_rutinaria(filename, codes, sheet_num=3, year=year)
df = re_assert_cod_datatypes(df)
dfs.append(df)
df = pd.concat(dfs)
df.columns.name = None
return re_assert_cod_datatypes(df)
def get_time_series_from_municipio_subset(subset_mun, event_col_n, cod_eve, year):
try:
series_index = get_index_from_col_and_string(
subset_mun, subset_mun.columns[event_col_n], cod_eve
)
series = list(subset_mun.loc[series_index, :][6:])
except Exception as e:
series = [0] * WEEKS_YEAR[year]
return series
def get_index_from_col_and_string(df, col, string):
return df[df.loc[:, col] == string].index[0]
def dataframe_from_dengue_series(series, value_name, year):
cols = ["ANO", "COD_DPTO", "COD_MUNICIPIO"] + [
str(i) for i in range(1, WEEKS_YEAR[year] + 1)
]
df = pd.DataFrame(series).transpose()
df.columns = cols
df = pd.melt(
df,
id_vars=["ANO", "COD_DPTO", "COD_MUNICIPIO"],
var_name="SEMANA",
value_name=value_name,
)
return df
####################
# CITIES FUNCTIONS #
####################
def get_composite_city_code(x):
composite_code = str(x["COD_DPTO"]).zfill(2) + str(x["COD_MUNICIPIO"]).zfill(3)
x["COD_MUNICIPIO"] = composite_code
return x
def combine_cities_and_weeks(df_weeks, df_cities):
dfs_week_city = []
for city in df_cities.iterrows():
df_week_city = df_weeks.copy()
df_week_city["COD_MUNICIPIO"] = city[1][1]
df_week_city["COD_DPTO"] = city[1][0]
dfs_week_city.append(df_week_city)
res = pd.concat(dfs_week_city, axis=0)
return res
def combine_cities_weeks_and_dengue(df_weeks_cities, df_dengue):
den_columns = [
"ANO",
"SEMANA",
"COD_DPTO",
"COD_MUNICIPIO",
"DENGUE",
"DENGUE GRAVE",
"MORTALIDAD POR DENGUE",
]
res = pd.merge(
left=df_weeks_cities.reset_index(),
right=df_dengue[den_columns],
how="left",
on=["ANO", "SEMANA", "COD_MUNICIPIO"],
suffixes=("", "_y"),
)
res = res.drop(res.filter(regex="_y$").columns.tolist(), axis=1)
res = res.set_index("FECHA")
return res
def read_csv_IDEAM(filenames, sep=r";|,"):
"""
This function receives a list of .csv files downloaded from
http://dhime.ideam.gov.co/atencionciudadano/
and combines information into a single pandas dataframe by keeping only the date
and the relevant data column.
-filenames: list of one or more filenames of .csv files.
-codigo_estacion: the code of the station we are interested in.
-sep: the kind of separation used for pd.read_csv, it is good to try and
switch between ; and , if there is any trouble since
not all files downloaded from IDEAM have the same separator.
returns: a dataframe with the datetime column as it index, and the V
"""
dfs = []
for filename in filenames:
dfs.append(
pd.read_csv(
filename,
sep,
dtype={"Calificador": "object", "Latitud": "object"},
engine="python",
)
)
if len(dfs) > 0:
df = pd.concat(dfs, axis=0).reset_index(drop=True)
else:
df = dfs[0]
return df
def get_variable_IDEAM(df, possible_var_names, standard_var_name, codigo_estacion=None):
df = df[df["Etiqueta"].isin(possible_var_names)].reset_index(drop=True)
# value_name = df.loc[0, "Etiqueta"]
if codigo_estacion:
df = df[(df["CodigoEstacion"] == codigo_estacion)]
df = df[["CodigoEstacion", "Fecha", "Valor"]]
df.columns = ["CodigoEstacion", "DATE", standard_var_name]
df.index = pd.to_datetime(df["DATE"], dayfirst=False)
df = df.drop_duplicates()
del df["DATE"]
return df
def filter_entries_by_column_index_values(df, column_name, min_val=None, max_val=None):
if min_val and max_val:
df = df[(df[column_name] >= min_val) & (df[column_name] <= max_val)]
return df
elif not min_val:
df = df[(df[column_name] <= max_val)]
return df
elif not max_val:
df = df[(df[column_name] >= min_val)]
return df
else:
return df
def combine_IDEAM_stations(df_all, stations_priority):
df_st = df_all[df_all["CodigoEstacion"] == stations_priority[0]]
if len(df_all) == 1:
return df_st
for station in stations_priority[1:]:
df_st_add = df_all[df_all["CodigoEstacion"] == station]
df_st_add = df_st_add[(~df_st_add.index.isin(df_st.index))]
df_st = | pd.concat([df_st, df_st_add]) | pandas.concat |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: | pd.Timestamp("2013-02-16 00:00:00") | pandas.Timestamp |
'''
example of loading FinMind api
'''
from Data import Load
import requests
import pandas as pd
url = 'http://finmindapi.servebeer.com/api/data'
list_url = 'http://finmindapi.servebeer.com/api/datalist'
translate_url = 'http://finmindapi.servebeer.com/api/translation'
'''----------------TaiwanStockInfo----------------'''
form_data = {'dataset': 'TaiwanStockInfo'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------Taiwan Stock Dividend Result----------------'''
form_data = {'dataset': 'StockDividendResult'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TotalMarginPurchaseShortSale----------------'''
form_data = {'dataset': 'StockDividendResult',
'stock_id': '2330',
'date': '2010-10-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockNews----------------'''
form_data = {'dataset': 'TaiwanStockNews',
'date': '2019-10-10',
'stock_id': '2317'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPrice----------------'''
form_data = {'dataset': 'TaiwanStockPrice',
'stock_id': '2317',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockPriceMinute----------------'''
form_data = {'dataset': 'TaiwanStockPriceMinute',
'stock_id': '2330',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------FinancialStatements----------------'''
form_data = {'dataset': 'FinancialStatements',
'stock_id': '2317',
'date': '2019-01-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data = Load.transpose(data)
data.head()
'''----------------TaiwanCashFlowsStatement----------------'''
form_data = {'dataset': 'TaiwanCashFlowsStatement',
'stock_id': '2330',
'date': '2019-06-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockStockDividend----------------'''
form_data = {'dataset': 'TaiwanStockStockDividend',
'stock_id': '2317',
'date': '2018-01-01'}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = pd.DataFrame(temp['data'])
data.head()
'''----------------TaiwanStockStockDividend----------------'''
form_data = {'dataset': 'StockDividend',
'stock_id': '0050',
'date': '2015-01-02',
}
res = requests.post(
url, verify=True,
data=form_data)
temp = res.json()
data = | pd.DataFrame(temp['data']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 15 11:01:20 2020
@author: Ray
@email: <EMAIL>
@wechat: RayTing0305
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import string
import scipy.stats as stats
'''
relax a little bit
test a quiz
'''
sdata = {'Ohio': 35000, 'Texas': 71000, 'Oregon': 16000, 'Utah': 5000}
obj1 = pd.Series(sdata)
states = ['California', 'Ohio', 'Oregon', 'Texas']
obj2 = pd.Series(sdata, index = states)
obj3 = pd.isnull(obj2)
x=obj2['California']
#print(obj2['California']!=x)
#print(obj2['California']==None)
#print(math.isnan(obj2['California']))
#print(pd.isna(obj2['California']))
d = {
'1': 'Alice',
'2': 'Bob',
'3': 'Rita',
'4': 'Molly',
'5': 'Ryan'
}
S = pd.Series(d)
data = pd.DataFrame(np.arange(16).reshape((4, 4)),
index=['Ohio', 'Colorado', 'Utah', 'New York'],
columns=['one', 'two', 'three', 'four'])
data1 = data.rename(mapper=lambda x:x.upper(), axis=1)
#data2 = data.rename(mapper=lambda x:x.upper(), axis=1, inplace=True)
#data3 = data.rename(mapper=lambda x:x.upper(), axis='column')
data = {'gre score':[337, 324, 316, 322, 314],
'toefl score':[118, 107, 104, 110, 103]}
df = pd.DataFrame(data, columns=['gre score', 'toefl score'],
index=np.arange(1, 6, 1))
df1 = df.where(df['toefl score']>105).dropna()
df2 = df[df['toefl score']>105]
df3 = df.where(df['toefl score']>105)
#s1 = pd.Series({1: 'Alice', 2: 'Jack', 3: 'Molly'})
#s2 = pd.Series({'Alice': 1, 'Jack': 2, 'Molly': 3})
df1 = df[df['toefl score'].gt(105)&df['toefl score'].lt(115)]
df2 = df[(df['toefl score'].isin(range(106,115)))]
df3 = (df['toefl score']>105)&(df['toefl score']<115)
data = {'Name':['Alice', 'Jack'],
'Age':[20, 22],
'Gender':['F', 'M']}
index1 = ['Mathematics', 'Sociology']
df = | pd.DataFrame(data, index=index1) | pandas.DataFrame |
"""bcbio run module"""
import os
import re
import yaml
import glob
from itertools import chain
import pandas as pd
import datetime
from scilifelab.utils.misc import filtered_walk, query_yes_no, prune_option_list
from scilifelab.utils.dry import dry_write, dry_backup, dry_unlink, dry_rmdir, dry_makedir
from scilifelab.log import minimal_logger
from scilifelab.bcbio import sort_sample_config_fastq, update_sample_config, update_pp_platform_args, merge_sample_config
from scilifelab.bcbio.flowcell import Flowcell
LOG = minimal_logger(__name__)
# The analysis script for running the pipeline in parallell mode (on one node)
PARALLELL_ANALYSIS_SCRIPT="automated_initial_analysis.py"
# The analysis script for running the pipeline in distributed mode (across multiple nodes/cores)
DISTRIBUTED_ANALYSIS_SCRIPT="distributed_nextgen_pipeline.py"
# If True, will sanitize the run_info.yaml configuration file when running non-CASAVA analysis
PROCESS_YAML = True
# If True, will assign the distributed master process and workers to a separate RabbitMQ queue for each flowcell
FC_SPECIFIC_AMPQ = True
## Name of merged sample output directory
MERGED_SAMPLE_OUTPUT_DIR = "TOTAL"
def _sample_status(x):
"""Find the status of a sample.
Look for output files: currently only look for project-summary.csv"""
if os.path.exists(os.path.join(os.path.dirname(x), "project-summary.csv")):
return "PASS"
else:
return "FAIL"
def _group_samples(flist, include_merged=False):
"""Group samples by sample name and flowcell
This function assumes flist consists of bcbb-config.yaml files. It reads each file
and extracts sample name and flowcell for subsequent grouping. Exclude MERGED_SAMPLE_OUTPUT_DIR from grouping.
:param flist: list of bcbb-config.yaml files
:returns: dictionary of samples grouped by name and flowcell
"""
sample_d = {}
for f in flist:
if not include_merged and os.path.dirname(f).endswith(MERGED_SAMPLE_OUTPUT_DIR):
continue
with open(f) as fh:
conf = yaml.load(fh)
if conf.get("details", [])[0].get("multiplex", []):
sample_id = conf.get("details", [])[0].get("multiplex", [])[0].get("name", None)
else:
sample_id = conf.get("details", [])[0].get("description")
if not sample_id:
LOG.warn("No sample_id found in file {}; skipping".format(f))
continue
if conf.get("details", [])[0].get("flowcell_id", None):
fc_id = conf.get("details", [])[0].get("flowcell_id")
else:
fc_id = conf.get("fc_name", None)
if not fc_id:
LOG.warn("No flowcell_id found in file {}; skipping".format(f))
continue
if not sample_id in sample_d.keys():
sample_d[sample_id] = {fc_id:f}
else:
sample_d[sample_id][fc_id] = f
return sample_d
## FIXME: make pandas data frame of all samples, with info about lane,
## flowcell, date, barcode_id, path, sample name -> makes searching for files much easier
def sample_table(flist):
"""Make a table from bcbb-config yaml files.
:param flist: file list of config files
:returns: data frame
"""
samples = []
for f in flist:
path = os.path.dirname(f)
with open(f) as fh:
conf = yaml.load(fh)
runinfo = conf.get("details") if conf.get("details", None) else conf
for info in runinfo:
lane = info.get("lane", None)
fc_name = info.get("flowcell_id", None)
fc_date = info.get("fc_date", None)
if info.get("multiplex", None):
for mp in info.get("multiplex"):
barcode_id = mp.get("barcode_id", None)
sample = mp.get("name", None)
samples.append([sample, lane, barcode_id, fc_name, fc_date, path])
else:
barcode_id = None
sample = info.get("description", None)
fc_name = conf.get("fc_name", None)
fc_date = conf.get("fc_date", None)
samples.append([sample, lane, barcode_id, fc_name, fc_date, path])
return | pd.DataFrame(samples, columns=["sample", "lane", "barcode_id", "fc_name", "fc_date", "path"]) | pandas.DataFrame |
import pandas as pd
from surprise import KNNWithMeans, SVD, SVDpp, NMF
from surprise.prediction_algorithms.slope_one import SlopeOne
from settings.config import user_label, NMF_LABEL, \
SVDpp_LABEL, SVD_LABEL, SLOPE_LABEL, ITEMKNN_LABEL, USERKNN_LABEL, item_label, value_label, K_NEIGHBOR
from conversions.pandas_to_models import transform_testset, user_transactions_df_to_item_mapping
from conversions.suprise_and_pandas import surprise_to_pandas_get_candidates_items
from settings.language_strings import LANGUAGE_USER_KNN_START, LANGUAGE_ITEM_KNN_START, \
LANGUAGE_SVD_START, LANGUAGE_SVD_STOP, LANGUAGE_SVDPP_START, LANGUAGE_SVDPP_STOP, \
LANGUAGE_NMF_START, LANGUAGE_NMF_STOP, LANGUAGE_SLOPE_ONE_START, LANGUAGE_SLOPE_ONE_STOP
from posprocessing.step import postprocessing_calibration
from processing.recommendation_average import users_results_mean
def recommendation_and_posprocessing(user_id, user_trainset_df, user_prefs_distr_df, user_testset_df, item_mapping,
instance, baseline_label):
keys_list = item_mapping.keys()
know_items = user_trainset_df[item_label].unique().tolist()
unknow_items = set(keys_list) - set(know_items)
data = {item_label: list(unknow_items)}
user_testset = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# // Licensed under the Apache License, Version 2.0 (the "License");
# // you may not use this file except in compliance with the License.
# // You may obtain a copy of the License at
# //
# // http://www.apache.org/licenses/LICENSE-2.0
# //
# // Unless required by applicable law or agreed to in writing, software
# // distributed under the License is distributed on an "AS IS" BASIS,
# // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# // See the License for the specific language governing permissions and
# // limitations under the License.
import pandas as pd
import numpy as np
from twister2deepnet.deepnet.exception.internal import ParameterError
class UtilPanda:
@staticmethod
def convert_numpy_to_pandas(ndarray=None):
dataframe = None
if not isinstance(ndarray, np.ndarray):
raise ParameterError("Input is {}, but expected {}".format(type(ndarray),
type(np.ndarray)))
else:
dataframe = pd.DataFrame(ndarray)
return dataframe
@staticmethod
def convert_partition_to_pandas(partition=None):
"""
Here we convert a Parition data type into a Pandas DataFrame
The content in each segment of a partition is made flatten to support 2-D nature of a
Pandas DataFrame
Warning: At the end usage make sure to unwrap to fit the original shape
:param partition: Input is the Partition Object of Data from Pytorch Data Source
:return: Pandas dataframe
"""
dataframe = None
lst = []
if not len(partition) > 0:
pass
for item in partition:
## Check 2D data
if len(item.shape) > 1:
lst.append(item.flatten())
else:
lst.append(item)
lst_array = np.array(lst)
print(lst_array.shape)
dataframe = | pd.DataFrame(lst_array) | pandas.DataFrame |
#!/usr/bin/env python3
"""Machine Learning module for ADNI capstone project.
This module contains functions for use with the ADNI dataset.
"""
if 'pd' not in globals():
import pandas as pd
if 'np' not in globals():
import numpy as np
if 'plt' not in globals():
import matplotlib.pyplot as plt
if 'sns' not in globals():
import seaborn as sns
if 'scipy.stats' not in globals():
import scipy.stats
if 'StandardScaler' not in globals():
from sklearn.preprocessing import StandardScaler, MinMaxScaler
if 'KNeighborsClassifier' not in globals():
from sklearn.neighbors import KNeighborsClassifier
if 'SVC' not in globals():
from sklearn.svm import SVC
if 'train_test_split' not in globals():
from sklearn.model_selection import train_test_split, GridSearchCV
if 'MultinomialNB' not in globals():
from sklearn.naive_bayes import MultinomialNB
if 'confusion_matrix' not in globals():
from sklearn.metrics import roc_auc_score, confusion_matrix, classification_report
if 'RandomForestClassifier' not in globals():
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
if 'linear_model' not in globals():
from sklearn import linear_model
if 'PCA' not in globals():
from sklearn.decomposition import PCA
sns.set()
def get_delta_scaled(final_exam, neg_one=False):
"""Take the final_exam dataframe and return datasets.
This function returns five numpy arrays: feature_names, X_delta_male,
X_delta_female, y_delta_male, and y_delta_female. The two X arrays hold
the feature data. The two y arrays hold the diagnosis group labels.
The feature_names array hold a list of the features. The neg_one
parameter allows you to specify -1 for the negative class (for SVM)."""
# map the diagnosis group and assign to dx_group
nc_idx = final_exam[final_exam.DX == final_exam.DX_bl2].index
cn_mci_idx = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'CN')].index
mci_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'MCI')].index
cn_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'CN')].index
if neg_one:
labels = pd.concat([pd.DataFrame({'dx_group': -1}, index=nc_idx),
pd.DataFrame({'dx_group': -1}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
pd.DataFrame({'dx_group': 1}, index=cn_ad_idx)
]).sort_index()
else:
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=nc_idx),
pd.DataFrame({'dx_group': 0}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
pd.DataFrame({'dx_group': 1}, index=cn_ad_idx)
]).sort_index()
# add to the dataframe and ensure every row has a label
deltas_df = final_exam.loc[labels.index]
deltas_df.loc[:,'dx_group'] = labels.dx_group
# convert gender to numeric column
deltas_df = pd.get_dummies(deltas_df, drop_first=True, columns=['PTGENDER'])
# extract the features for change in diagnosis
X_delta = deltas_df.reindex(columns=['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta',
'RAVLT_delta', 'Hippocampus_delta', 'Ventricles_delta',
'WholeBrain_delta', 'Entorhinal_delta', 'MidTemp_delta',
'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_delta', 'ADAS11_delta', 'ADAS13_delta', 'MMSE_delta', 'RAVLT_delta',
'Hippocampus_delta', 'Ventricles_delta', 'WholeBrain_delta',
'Entorhinal_delta', 'MidTemp_delta', 'PTGENDER_Male', 'AGE'])
# standardize the data
scaler = StandardScaler()
Xd = scaler.fit_transform(X_delta)
# extract the labels
yd = np.array(deltas_df.dx_group)
# return the data
return feature_names, Xd, yd
def plot_best_k(X_train, X_test, y_train, y_test, kmax=9):
"""This function will create a plot to help choose the best k for k-NN.
Supply the training and test data to compare accuracy at different k values.
Specifying a max k value is optional."""
# Setup arrays to store train and test accuracies
# view the plot to help pick the best k to use
neighbors = np.arange(1, kmax)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over different values of k
for i, k in enumerate(neighbors):
# Setup a k-NN Classifier with k neighbors: knn
knn = KNeighborsClassifier(n_neighbors=k)
# Fit the classifier to the training data
knn.fit(X_train, y_train)
#Compute accuracy on the training set
train_accuracy[i] = knn.score(X_train, y_train)
#Compute accuracy on the testing set
test_accuracy[i] = knn.score(X_test, y_test)
if kmax < 11:
s = 2
elif kmax < 21:
s = 4
elif kmax < 41:
s = 5
elif kmax < 101:
s = 10
else:
s = 20
# Generate plot
_ = plt.title('k-NN: Varying Number of Neighbors')
_ = plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
_ = plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
_ = plt.legend()
_ = plt.xlabel('Number of Neighbors')
_ = plt.ylabel('Accuracy')
_ = plt.xticks(np.arange(0,kmax,s))
plt.show()
def plot_f1_scores(k, s, r, b, l, n):
"""This function accepts six dictionaries containing classification reports.
This function is designed to work specifically with the six dictionaries created
in the 5-Machine_Learning notebook, as the second dictionary is SVM, which
uses classes of -1 and 1, whereas the other classes are 0 and 1."""
# extract the data and store in a dataframe
df = pd.DataFrame({'score': [k['0']['f1-score'], k['1']['f1-score'], s['-1']['f1-score'], s['1']['f1-score'],
r['0']['f1-score'], r['1']['f1-score'], b['0']['f1-score'], b['1']['f1-score'],
l['0']['f1-score'], l['1']['f1-score'], n['0']['f1-score'], n['1']['f1-score']],
'model': ['KNN', 'KNN', 'SVM', 'SVM', 'Random Forest', 'Random Forest',
'AdaBoost', 'AdaBoost', 'Log Reg', 'Log Reg', 'Naive Bayes', 'Naive Bayes'],
'group': ['Non AD', 'AD', 'Non AD', 'AD', 'Non AD', 'AD', 'Non AD', 'AD',
'Non AD', 'AD', 'Non AD', 'AD']})
# create the plot
ax = sns.barplot('model', 'score', hue='group', data=df)
_ = plt.setp(ax.get_xticklabels(), rotation=25)
_ = plt.title('F1 Scores for Each Model')
_ = plt.ylabel('F1 Score')
_ = plt.xlabel('Model')
_ = ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
def get_bl_data(final_exam, neg_one=False):
"""This function extracts the baseline data features for machine learning.
Pass the final_exam dataframe, specify optional neg_one=True for SVM (sets)
the non-Ad class as -1 vs 0. Returns features (X), labels (y), and
feature_names.
"""
# map the diagnosis group and assign to dx_group
non_ad_idx = final_exam[final_exam.DX != 'AD'].index
ad_idx = final_exam[final_exam.DX == 'AD'].index
if neg_one:
labels = pd.concat([pd.DataFrame({'dx_group': -1}, index=non_ad_idx),
pd.DataFrame({'dx_group': 1}, index=ad_idx)
]).sort_index()
else:
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=non_ad_idx),
pd.DataFrame({'dx_group': 1}, index=ad_idx)
]).sort_index()
# add to the dataframe and ensure every row has a label
bl_df = final_exam.loc[labels.index]
bl_df.loc[:,'dx_group'] = labels.dx_group
# convert gender to numeric column
bl_df = pd.get_dummies(bl_df, drop_first=True, columns=['PTGENDER'])
# extract the baseline features
X_bl = bl_df.reindex(columns=['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl', 'RAVLT_immediate_bl',
'Hippocampus_bl', 'Ventricles_bl', 'WholeBrain_bl', 'Entorhinal_bl',
'MidTemp_bl', 'PTGENDER_Male', 'AGE'])
# store the feature names
feature_names = np.array(['CDRSB_bl', 'ADAS11_bl', 'ADAS13_bl', 'MMSE_bl', 'RAVLT_immediate_bl',
'Hippocampus_bl', 'Ventricles_bl', 'WholeBrain_bl', 'Entorhinal_bl',
'MidTemp_bl', 'PTGENDER_Male', 'AGE'])
# standardize the data
scaler = StandardScaler()
Xd = scaler.fit_transform(X_bl)
# extract the labels
yd = np.array(bl_df.dx_group)
# return the data
return feature_names, Xd, yd
def run_clinical_models(final_exam, biomarkers):
"""This dataframe runs six machine learning models on only the clinical biomarkes.
A dataframe containing summary information will be returned."""
# map the diagnosis group and assign to dx_group
nc_idx = final_exam[final_exam.DX == final_exam.DX_bl2].index
cn_mci_idx = final_exam[(final_exam.DX == 'MCI') & (final_exam.DX_bl2 == 'CN')].index
mci_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'MCI')].index
cn_ad_idx = final_exam[(final_exam.DX == 'AD') & (final_exam.DX_bl2 == 'CN')].index
labels = pd.concat([pd.DataFrame({'dx_group': 0}, index=nc_idx),
pd.DataFrame({'dx_group': 0}, index=cn_mci_idx),
pd.DataFrame({'dx_group': 1}, index=mci_ad_idx),
| pd.DataFrame({'dx_group': 1}, index=cn_ad_idx) | pandas.DataFrame |
"""Network representation and utilities
"""
import os
import geopandas
import numpy as np
import pandas
import shapely.errors
from geopandas import GeoDataFrame
from shapely.geometry import (
Point,
MultiPoint,
LineString,
GeometryCollection,
shape,
mapping,
)
from shapely.ops import split, linemerge
# workaround for geopandas >0.9 until snkit #37 and geopandas #1977 are fixed
geopandas._compat.USE_PYGEOS = False
# optional progress bars
if "SNKIT_PROGRESS" in os.environ and os.environ["SNKIT_PROGRESS"] in ("1",
"TRUE"):
try:
from tqdm import tqdm
except ImportError:
from snkit.utils import tqdm_standin as tqdm
else:
from snkit.utils import tqdm_standin as tqdm
class Network:
"""A Network is composed of nodes (points in space) and edges (lines)
Parameters
----------
nodes : geopandas.geodataframe.GeoDataFrame, optional
edges : geopandas.geodataframe.GeoDataFrame, optional
Attributes
----------
nodes : geopandas.geodataframe.GeoDataFrame
edges : geopandas.geodataframe.GeoDataFrame
"""
def __init__(self, nodes=None, edges=None):
""" """
if nodes is None:
nodes = GeoDataFrame()
self.nodes = nodes
if edges is None:
edges = GeoDataFrame()
self.edges = edges
def set_crs(self, crs=None, epsg=None):
"""Set network (node and edge) crs
Parameters
----------
crs : dict or str
Projection parameters as PROJ4 string or in dictionary form.
epsg : int
EPSG code specifying output projection
"""
if crs is None and epsg is None:
raise ValueError(
"Either crs or epsg must be provided to Network.set_crs")
if epsg is not None:
crs = {"init": "epsg:{}".format(epsg)}
self.edges.crs = crs
self.nodes.crs = crs
def to_crs(self, crs=None, epsg=None):
"""Set network (node and edge) crs
Parameters
----------
crs : dict or str
Projection parameters as PROJ4 string or in dictionary form.
epsg : int
EPSG code specifying output projection
"""
if crs is None and epsg is None:
raise ValueError(
"Either crs or epsg must be provided to Network.set_crs")
if epsg is not None:
crs = {"init": "epsg:{}".format(epsg)}
self.edges.to_crs(crs, inplace=True)
self.nodes.to_crs(crs, inplace=True)
def add_ids(network, id_col="id", edge_prefix="edge", node_prefix="node"):
"""Add or replace an id column with ascending ids"""
nodes = network.nodes.copy()
if not nodes.empty:
nodes = nodes.reset_index(drop=True)
edges = network.edges.copy()
if not edges.empty:
edges = edges.reset_index(drop=True)
nodes[id_col] = ["{}_{}".format(node_prefix, i) for i in range(len(nodes))]
edges[id_col] = ["{}_{}".format(edge_prefix, i) for i in range(len(edges))]
return Network(nodes=nodes, edges=edges)
def add_topology(network, id_col="id"):
"""Add or replace from_id, to_id to edges"""
from_ids = []
to_ids = []
for edge in tqdm(network.edges.itertuples(),
desc="topology",
total=len(network.edges)):
start, end = line_endpoints(edge.geometry)
start_node = nearest_node(start, network.nodes)
from_ids.append(start_node[id_col])
end_node = nearest_node(end, network.nodes)
to_ids.append(end_node[id_col])
edges = network.edges.copy()
edges["from_id"] = from_ids
edges["to_id"] = to_ids
return Network(nodes=network.nodes, edges=edges)
def get_endpoints(network):
"""Get nodes for each edge endpoint"""
endpoints = []
for edge in tqdm(network.edges.itertuples(),
desc="endpoints",
total=len(network.edges)):
if edge.geometry is None:
continue
if edge.geometry.geometryType() == "MultiLineString":
for line in edge.geometry.geoms:
start, end = line_endpoints(line)
endpoints.append(start)
endpoints.append(end)
else:
start, end = line_endpoints(edge.geometry)
endpoints.append(start)
endpoints.append(end)
# create dataframe to match the nodes geometry column name
return matching_gdf_from_geoms(network.nodes, endpoints)
def add_endpoints(network):
"""Add nodes at line endpoints"""
endpoints = get_endpoints(network)
nodes = concat_dedup([network.nodes, endpoints])
return Network(nodes=nodes, edges=network.edges)
def round_geometries(network, precision=3):
"""Round coordinates of all node points and vertices of edge linestrings to some precision"""
def _set_precision(geom):
return set_precision(geom, precision)
network.nodes.geometry = network.nodes.geometry.apply(_set_precision)
network.edges.geometry = network.edges.geometry.apply(_set_precision)
return network
def split_multilinestrings(network):
"""Create multiple edges from any MultiLineString edge
Ensures that edge geometries are all LineStrings, duplicates attributes over any
created multi-edges.
"""
simple_edge_attrs = []
simple_edge_geoms = []
edges = network.edges
# # Commented out idea about dealing with multi-part MultiLineStrings separately
# is_multi_single_part = edges.geometry.apply(lambda geom: geom.geom_type == 'MultiLineString' and len(geom) == 1)
# is_multi_multi_part = edges.geometry.apply(lambda geom: geom.geom_type == 'MultiLineString' and len(geom) > 1)
# is_not_multi = edges.geometry.apply(lambda geom: geom.geom_type != 'MultiLineString')
# not_multi_edges = edges[is_not_multi].copy()
# multi_single_part_edges = edges[is_multi_single_part].copy()
# multi_single_part_edges.geometry = multi_single_part_edges.geometry.apply(lambda geom: next(geom))
# edges = edges[is_multi_multi_part].copy()
for edge in tqdm(edges.itertuples(index=False),
desc="split_multi",
total=len(edges)):
if edge.geometry.geom_type == "MultiLineString":
edge_parts = list(edge.geometry)
else:
edge_parts = [edge.geometry]
for part in edge_parts:
simple_edge_geoms.append(part)
attrs = GeoDataFrame([edge] * len(edge_parts))
simple_edge_attrs.append(attrs)
simple_edge_geoms = GeoDataFrame(simple_edge_geoms, columns=["geometry"])
edges = (pandas.concat(simple_edge_attrs,
axis=0).reset_index(drop=True).drop("geometry",
axis=1))
edges = pandas.concat([edges, simple_edge_geoms], axis=1)
# edges = pandas.concat([edges, multi_single_part_edges, not_multi_edges], axis=0).reset_index(drop=True)
return Network(nodes=network.nodes, edges=edges)
def merge_multilinestring(geom):
"""Merge a MultiLineString to LineString"""
try:
if geom.geom_type == "MultiLineString":
geom_inb = linemerge(geom)
if geom_inb.is_ring:
return geom
# In case of linestring merge issues, we could add this to the script again
# from centerline.main import Centerline
# if geom_inb.geom_type == 'MultiLineString':
# return linemerge(Centerline(geom.buffer(0.5)))
else:
return geom_inb
else:
return geom
except:
return GeometryCollection()
def snap_nodes(network, threshold=None):
"""Move nodes (within threshold) to edges"""
def snap_node(node):
snap = nearest_point_on_edges(node.geometry, network.edges)
distance = snap.distance(node.geometry)
if threshold is not None and distance > threshold:
snap = node.geometry
return snap
snapped_geoms = network.nodes.apply(snap_node, axis=1)
geom_col = geometry_column_name(network.nodes)
nodes = pandas.concat(
[
network.nodes.drop(geom_col, axis=1),
GeoDataFrame(snapped_geoms, columns=[geom_col]),
],
axis=1,
)
return Network(nodes=nodes, edges=network.edges)
def split_edges_at_nodes(network, tolerance=1e-9):
"""Split network edges where they intersect node geometries"""
split_edges = []
for edge in tqdm(network.edges.itertuples(index=False),
desc="split",
total=len(network.edges)):
hits = nodes_intersecting(edge.geometry, network.nodes, tolerance)
split_points = MultiPoint([hit.geometry for hit in hits.itertuples()])
# potentially split to multiple edges
edges = split_edge_at_points(edge, split_points, tolerance)
split_edges.append(edges)
# combine dfs
edges = pandas.concat(split_edges, axis=0)
# reset index and drop
edges = edges.reset_index().drop("index", axis=1)
# return new network with split edges
return Network(nodes=network.nodes, edges=edges)
def link_nodes_to_edges_within(network,
distance,
condition=None,
tolerance=1e-9):
"""Link nodes to all edges within some distance"""
new_node_geoms = []
new_edge_geoms = []
for node in tqdm(network.nodes.itertuples(index=False),
desc="link",
total=len(network.nodes)):
# for each node, find edges within
edges = edges_within(node.geometry, network.edges, distance)
for edge in edges.itertuples():
if condition is not None and not condition(node, edge):
continue
# add nodes at points-nearest
point = nearest_point_on_line(node.geometry, edge.geometry)
if point != node.geometry:
new_node_geoms.append(point)
# add edges linking
line = LineString([node.geometry, point])
new_edge_geoms.append(line)
new_nodes = matching_gdf_from_geoms(network.nodes, new_node_geoms)
all_nodes = concat_dedup([network.nodes, new_nodes])
new_edges = matching_gdf_from_geoms(network.edges, new_edge_geoms)
all_edges = concat_dedup([network.edges, new_edges])
# split edges as necessary after new node creation
unsplit = Network(nodes=all_nodes, edges=all_edges)
return split_edges_at_nodes(unsplit, tolerance)
def link_nodes_to_nearest_edge(network, condition=None):
"""Link nodes to all edges within some distance"""
new_node_geoms = []
new_edge_geoms = []
for node in tqdm(network.nodes.itertuples(index=False),
desc="link",
total=len(network.nodes)):
# for each node, find edges within
edge = nearest_edge(node.geometry, network.edges)
if condition is not None and not condition(node, edge):
continue
# add nodes at points-nearest
point = nearest_point_on_line(node.geometry, edge.geometry)
if point != node.geometry:
new_node_geoms.append(point)
# add edges linking
line = LineString([node.geometry, point])
new_edge_geoms.append(line)
new_nodes = matching_gdf_from_geoms(network.nodes, new_node_geoms)
all_nodes = concat_dedup([network.nodes, new_nodes])
new_edges = matching_gdf_from_geoms(network.edges, new_edge_geoms)
all_edges = concat_dedup([network.edges, new_edges])
# split edges as necessary after new node creation
unsplit = Network(nodes=all_nodes, edges=all_edges)
return split_edges_at_nodes(unsplit)
def merge_edges(network, id_col="id", by=None):
"""Merge edges that share a node with a connectivity degree of 2
Parameters
----------
network : snkit.network.Network
id_col : string
by : List[string], optional
list of columns to use when merging an edge path - will not merge if
edges have different values.
"""
if "degree" not in network.nodes.columns:
network.nodes["degree"] = network.nodes[id_col].apply(
lambda x: node_connectivity_degree(x, network))
degree2 = list(network.nodes[id_col].loc[network.nodes.degree == 2])
d2_set = set(degree2)
edge_paths = []
while d2_set:
if len(d2_set) % 1000 == 0:
print(len(d2_set))
popped_node = d2_set.pop()
node_path = set([popped_node])
candidates = set([popped_node])
while candidates:
popped_cand = candidates.pop()
matches = set(
np.unique(network.edges[[
"from_id", "to_id"
]].loc[(network.edges.from_id == popped_cand)
| (network.edges.to_id == popped_cand)].values))
matches.remove(popped_cand)
matches = matches - node_path
for match in matches:
if match in degree2:
candidates.add(match)
node_path.add(match)
d2_set.remove(match)
else:
node_path.add(match)
if len(node_path) > 2:
edge_paths.append(
network.edges.loc[(network.edges.from_id.isin(node_path))
& (network.edges.to_id.isin(node_path))])
concat_edge_paths = []
unique_edge_ids = set()
new_node_ids = set(network.nodes[id_col]) - set(degree2)
for edge_path in tqdm(edge_paths, desc="merge_edge_paths"):
unique_edge_ids.update(list(edge_path[id_col]))
edge_path = edge_path.dissolve(by=by)
edge_path_dicts = []
for edge in edge_path.itertuples(index=False):
if edge.geometry.geom_type == "MultiLineString":
edge_geom = linemerge(edge.geometry)
if edge_geom.geom_type == "MultiLineString":
edge_geoms = list(edge_geom)
else:
edge_geoms = [edge_geom]
else:
edge_geoms = [edge.geometry]
for geom in edge_geoms:
start, end = line_endpoints(geom)
start = nearest_node(start, network.nodes)
end = nearest_node(end, network.nodes)
edge_path_dict = {
"from_id": start[id_col],
"to_id": end[id_col],
"geometry": geom,
}
for i, col in enumerate(edge_path.columns):
if col not in ("from_id", "to_id", "geometry"):
edge_path_dict[col] = edge[i]
edge_path_dicts.append(edge_path_dict)
concat_edge_paths.append(geopandas.GeoDataFrame(edge_path_dicts))
new_node_ids.update(list(edge_path.from_id) + list(edge_path.to_id))
edges_new = network.edges.copy()
edges_new = edges_new.loc[~(edges_new.id.isin(list(unique_edge_ids)))]
edges_new.geometry = edges_new.geometry.apply(merge_multilinestring)
edges = pandas.concat(
[edges_new, pandas.concat(concat_edge_paths).reset_index()],
sort=False)
nodes = network.nodes.set_index(id_col).loc[list(
new_node_ids)].copy().reset_index()
return Network(nodes=nodes, edges=edges)
def geometry_column_name(gdf):
"""Get geometry column name, fall back to 'geometry'"""
try:
geom_col = gdf.geometry.name
except AttributeError:
geom_col = "geometry"
return geom_col
def matching_gdf_from_geoms(gdf, geoms):
"""Create a geometry-only GeoDataFrame with column name to match an existing GeoDataFrame"""
geom_col = geometry_column_name(gdf)
return GeoDataFrame(geoms, columns=[geom_col])
def concat_dedup(dfs):
"""Concatenate a list of GeoDataFrames, dropping duplicate geometries
- note: repeatedly drops indexes for deduplication to work
"""
cat = | pandas.concat(dfs, axis=0, sort=False) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from copy import deepcopy
from sklearn.utils import shuffle
from tqdm import tqdm
############ Make test networks ############
def make_triangonal_net():
"""
Make a triangonal network.
"""
dict_nodes = {'x': [1,3,2],
'y': [2,2,1],
'a': [1,0,0],
'b': [0,1,0],
'c': [0,0,1]}
nodes = pd.DataFrame.from_dict(dict_nodes)
data_edges = [[0,1],
[1,2],
[2,0]]
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_trigonal_net():
"""
Make a trigonal network.
"""
dict_nodes = {'x': [1,3,2,0,4,2],
'y': [2,2,1,3,3,0],
'a': [1,0,0,1,0,0],
'b': [0,1,0,0,1,0],
'c': [0,0,1,0,0,1]}
nodes = pd.DataFrame.from_dict(dict_nodes)
data_edges = [[0,1],
[1,2],
[2,0],
[0,3],
[1,4],
[2,5]]
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_P_net():
"""
Make a P-shaped network.
"""
dict_nodes = {'x': [0,0,0,0,1,1],
'y': [0,1,2,3,3,2],
'a': [1,0,0,0,0,0],
'b': [0,0,0,0,1,0],
'c': [0,1,1,1,0,1]}
nodes = pd.DataFrame.from_dict(dict_nodes)
data_edges = [[0,1],
[1,2],
[2,3],
[3,4],
[4,5],
[5,2]]
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_high_assort_net():
"""
Make a highly assortative network.
"""
dict_nodes = {'x': np.arange(12).astype(int),
'y': np.zeros(12).astype(int),
'a': [1] * 4 + [0] * 8,
'b': [0] * 4 + [1] * 4 + [0] * 4,
'c': [0] * 8 + [1] * 4}
nodes = pd.DataFrame.from_dict(dict_nodes)
edges_block = np.vstack((np.arange(3), np.arange(3) +1)).T
data_edges = np.vstack((edges_block, edges_block + 4, edges_block + 8))
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_high_disassort_net():
"""
Make a highly dissassortative network.
"""
dict_nodes = {'x': [1,2,3,4,4,4,3,2,1,0,0,0],
'y': [0,0,0,1,2,3,4,4,4,3,2,1],
'a': [1,0,0] * 4,
'b': [0,1,0] * 4,
'c': [0,0,1] * 4}
nodes = pd.DataFrame.from_dict(dict_nodes)
data_edges = np.vstack((np.arange(12), np.roll(np.arange(12), -1))).T
edges = pd.DataFrame(data_edges, columns=['source','target'])
return nodes, edges
def make_random_graph_2libs(nb_nodes=100, p_connect=0.1, attributes=['a', 'b', 'c'], multi_mod=False):
import networkx as nx
# initialize the network
G = nx.fast_gnp_random_graph(nb_nodes, p_connect, directed=False)
pos = nx.kamada_kawai_layout(G)
nodes = pd.DataFrame.from_dict(pos, orient='index', columns=['x','y'])
edges = pd.DataFrame(list(G.edges), columns=['source', 'target'])
# set attributes
if multi_mod:
nodes_class = np.random.randint(0, 2, size=(nb_nodes, len(attributes))).astype(bool)
nodes = nodes.join(pd.DataFrame(nodes_class, index=nodes.index, columns=attributes))
else:
nodes_class = np.random.choice(attributes, nb_nodes)
nodes = nodes.join(pd.DataFrame(nodes_class, index=nodes.index, columns=['nodes_class']))
nodes = nodes.join( | pd.get_dummies(nodes['nodes_class']) | pandas.get_dummies |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# *****************************************************************************/
# * Authors: <NAME>
# *****************************************************************************/
"""transformCSV.py
This module contains the basic functions for creating the content of a configuration file from CSV.
Args:
--inFile: Path for the configuration file where the time series data values CSV
--outFile: Path for the configuration file where the time series data values INI
--debug: Boolean flag to activate verbose printing for debug use
Example:
Default usage:
$ python transformCSV.py
Specific usage:
$ python transformCSV.py
--inFile C:\raad\src\software\time-series.csv
--outFile C:\raad\src\software\time-series.ini
--debug True
"""
import sys
import datetime
import optparse
import traceback
import pandas
import numpy
import os
import pprint
import csv
if sys.version_info.major > 2:
import configparser as cF
else:
import ConfigParser as cF
class TransformMetaData(object):
debug = False
fileName = None
fileLocation = None
columnsList = None
analysisFrameFormat = None
uniqueLists = None
analysisFrame = None
def __init__(self, inputFileName=None, debug=False, transform=False, sectionName=None, outFolder=None,
outFile='time-series-madness.ini'):
if isinstance(debug, bool):
self.debug = debug
if inputFileName is None:
return
elif os.path.exists(os.path.abspath(inputFileName)):
self.fileName = inputFileName
self.fileLocation = os.path.exists(os.path.abspath(inputFileName))
(analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList) = self.CSVtoFrame(
inputFileName=self.fileName)
self.analysisFrame = analysisFrame
self.columnsList = columnNamesList
self.analysisFrameFormat = analysisFrameFormat
self.uniqueLists = uniqueLists
if transform:
passWrite = self.frameToINI(analysisFrame=analysisFrame, sectionName=sectionName, outFolder=outFolder,
outFile=outFile)
print(f"Pass Status is : {passWrite}")
return
def getColumnList(self):
return self.columnsList
def getAnalysisFrameFormat(self):
return self.analysisFrameFormat
def getuniqueLists(self):
return self.uniqueLists
def getAnalysisFrame(self):
return self.analysisFrame
@staticmethod
def getDateParser(formatString="%Y-%m-%d %H:%M:%S.%f"):
return (lambda x: pandas.datetime.strptime(x, formatString)) # 2020-06-09 19:14:00.000
def getHeaderFromFile(self, headerFilePath=None, method=1):
if headerFilePath is None:
return (None, None)
if method == 1:
fieldnames = pandas.read_csv(headerFilePath, index_col=0, nrows=0).columns.tolist()
elif method == 2:
with open(headerFilePath, 'r') as infile:
reader = csv.DictReader(infile)
fieldnames = list(reader.fieldnames)
elif method == 3:
fieldnames = list(pandas.read_csv(headerFilePath, nrows=1).columns)
else:
fieldnames = None
fieldDict = {}
for indexName, valueName in enumerate(fieldnames):
fieldDict[valueName] = pandas.StringDtype()
return (fieldnames, fieldDict)
def CSVtoFrame(self, inputFileName=None):
if inputFileName is None:
return (None, None)
# Load File
print("Processing File: {0}...\n".format(inputFileName))
self.fileLocation = inputFileName
# Create data frame
analysisFrame = pandas.DataFrame()
analysisFrameFormat = self._getDataFormat()
inputDataFrame = pandas.read_csv(filepath_or_buffer=inputFileName,
sep='\t',
names=self._getDataFormat(),
# dtype=self._getDataFormat()
# header=None
# float_precision='round_trip'
# engine='c',
# parse_dates=['date_column'],
# date_parser=True,
# na_values=['NULL']
)
if self.debug: # Preview data.
print(inputDataFrame.head(5))
# analysisFrame.astype(dtype=analysisFrameFormat)
# Cleanup data
analysisFrame = inputDataFrame.copy(deep=True)
analysisFrame.apply(pandas.to_numeric, errors='coerce') # Fill in bad data with Not-a-Number (NaN)
# Create lists of unique strings
uniqueLists = []
columnNamesList = []
for columnName in analysisFrame.columns:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', analysisFrame[columnName].values)
if isinstance(analysisFrame[columnName].dtypes, str):
columnUniqueList = analysisFrame[columnName].unique().tolist()
else:
columnUniqueList = None
columnNamesList.append(columnName)
uniqueLists.append([columnName, columnUniqueList])
if self.debug: # Preview data.
print(analysisFrame.head(5))
return (analysisFrame, analysisFrameFormat, uniqueLists, columnNamesList)
def frameToINI(self, analysisFrame=None, sectionName='Unknown', outFolder=None, outFile='nil.ini'):
if analysisFrame is None:
return False
try:
if outFolder is None:
outFolder = os.getcwd()
configFilePath = os.path.join(outFolder, outFile)
configINI = cF.ConfigParser()
configINI.add_section(sectionName)
for (columnName, columnData) in analysisFrame:
if self.debug:
print('Column Name : ', columnName)
print('Column Contents : ', columnData.values)
print("Column Contents Length:", len(columnData.values))
print("Column Contents Type", type(columnData.values))
writeList = "["
for colIndex, colValue in enumerate(columnData):
writeList = f"{writeList}'{colValue}'"
if colIndex < len(columnData) - 1:
writeList = f"{writeList}, "
writeList = f"{writeList}]"
configINI.set(sectionName, columnName, writeList)
if not os.path.exists(configFilePath) or os.stat(configFilePath).st_size == 0:
with open(configFilePath, 'w') as configWritingFile:
configINI.write(configWritingFile)
noErrors = True
except ValueError as e:
errorString = ("ERROR in {__file__} @{framePrintNo} with {ErrorFound}".format(__file__=str(__file__),
framePrintNo=str(
sys._getframe().f_lineno),
ErrorFound=e))
print(errorString)
noErrors = False
return noErrors
@staticmethod
def _validNumericalFloat(inValue):
"""
Determines if the value is a valid numerical object.
Args:
inValue: floating-point value
Returns: Value in floating-point or Not-A-Number.
"""
try:
return numpy.float128(inValue)
except ValueError:
return numpy.nan
@staticmethod
def _calculateMean(x):
"""
Calculates the mean in a multiplication method since division produces an infinity or NaN
Args:
x: Input data set. We use a data frame.
Returns: Calculated mean for a vector data frame.
"""
try:
mean = numpy.float128(numpy.average(x, weights=numpy.ones_like(numpy.float128(x)) / numpy.float128(x.size)))
except ValueError:
mean = 0
pass
return mean
def _calculateStd(self, data):
"""
Calculates the standard deviation in a multiplication method since division produces a infinity or NaN
Args:
data: Input data set. We use a data frame.
Returns: Calculated standard deviation for a vector data frame.
"""
sd = 0
try:
n = numpy.float128(data.size)
if n <= 1:
return numpy.float128(0.0)
# Use multiplication version of mean since numpy bug causes infinity.
mean = self._calculateMean(data)
sd = numpy.float128(mean)
# Calculate standard deviation
for el in data:
diff = numpy.float128(el) - numpy.float128(mean)
sd += (diff) ** 2
points = numpy.float128(n - 1)
sd = numpy.float128(numpy.sqrt(numpy.float128(sd) / numpy.float128(points)))
except ValueError:
pass
return sd
def _determineQuickStats(self, dataAnalysisFrame, columnName=None, multiplierSigma=3.0):
"""
Determines stats based on a vector to get the data shape.
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
multiplierSigma: Sigma range for the stats.
Returns: Set of stats.
"""
meanValue = 0
sigmaValue = 0
sigmaRangeValue = 0
topValue = 0
try:
# Clean out anomoly due to random invalid inputs.
if (columnName is not None):
meanValue = self._calculateMean(dataAnalysisFrame[columnName])
if meanValue == numpy.nan:
meanValue = numpy.float128(1)
sigmaValue = self._calculateStd(dataAnalysisFrame[columnName])
if float(sigmaValue) is float(numpy.nan):
sigmaValue = numpy.float128(1)
multiplier = numpy.float128(multiplierSigma) # Stats: 1 sigma = 68%, 2 sigma = 95%, 3 sigma = 99.7
sigmaRangeValue = (sigmaValue * multiplier)
if float(sigmaRangeValue) is float(numpy.nan):
sigmaRangeValue = numpy.float128(1)
topValue = numpy.float128(meanValue + sigmaRangeValue)
print("Name:{} Mean= {}, Sigma= {}, {}*Sigma= {}".format(columnName,
meanValue,
sigmaValue,
multiplier,
sigmaRangeValue))
except ValueError:
pass
return (meanValue, sigmaValue, sigmaRangeValue, topValue)
def _cleanZerosForColumnInFrame(self, dataAnalysisFrame, columnName='cycles'):
"""
Cleans the data frame with data values that are invalid. I.E. inf, NaN
Args:
dataAnalysisFrame: Dataframe to do analysis on.
columnName: Column name of the data frame.
Returns: Cleaned dataframe.
"""
dataAnalysisCleaned = None
try:
# Clean out anomoly due to random invalid inputs.
(meanValue, sigmaValue, sigmaRangeValue, topValue) = self._determineQuickStats(
dataAnalysisFrame=dataAnalysisFrame, columnName=columnName)
# dataAnalysisCleaned = dataAnalysisFrame[dataAnalysisFrame[columnName] != 0]
# When the cycles are negative or zero we missed cleaning up a row.
# logicVector = (dataAnalysisFrame[columnName] != 0)
# dataAnalysisCleaned = dataAnalysisFrame[logicVector]
logicVector = (dataAnalysisCleaned[columnName] >= 1)
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
# These timed out mean + 2 * sd
logicVector = (dataAnalysisCleaned[columnName] < topValue) # Data range
dataAnalysisCleaned = dataAnalysisCleaned[logicVector]
except ValueError:
pass
return dataAnalysisCleaned
def _cleanFrame(self, dataAnalysisTemp, cleanColumn=False, columnName='cycles'):
"""
Args:
dataAnalysisTemp: Dataframe to do analysis on.
cleanColumn: Flag to clean the data frame.
columnName: Column name of the data frame.
Returns: cleaned dataframe
"""
try:
replacementList = [pandas.NaT, numpy.Infinity, numpy.NINF, 'NaN', 'inf', '-inf', 'NULL']
if cleanColumn is True:
dataAnalysisTemp = self._cleanZerosForColumnInFrame(dataAnalysisTemp, columnName=columnName)
dataAnalysisTemp = dataAnalysisTemp.replace(to_replace=replacementList,
value=numpy.nan)
dataAnalysisTemp = dataAnalysisTemp.dropna()
except ValueError:
pass
return dataAnalysisTemp
@staticmethod
def _getDataFormat():
"""
Return the dataframe setup for the CSV file generated from server.
Returns: dictionary data format for pandas.
"""
dataFormat = {
"Serial_Number": pandas.StringDtype(),
"LogTime0": pandas.StringDtype(), # @todo force rename
"Id0": pandas.StringDtype(), # @todo force rename
"DriveId": pandas.StringDtype(),
"JobRunId": pandas.StringDtype(),
"LogTime1": pandas.StringDtype(), # @todo force rename
"Comment0": pandas.StringDtype(), # @todo force rename
"CriticalWarning": pandas.StringDtype(),
"Temperature": pandas.StringDtype(),
"AvailableSpare": pandas.StringDtype(),
"AvailableSpareThreshold": pandas.StringDtype(),
"PercentageUsed": pandas.StringDtype(),
"DataUnitsReadL": pandas.StringDtype(),
"DataUnitsReadU": pandas.StringDtype(),
"DataUnitsWrittenL": pandas.StringDtype(),
"DataUnitsWrittenU": pandas.StringDtype(),
"HostReadCommandsL": pandas.StringDtype(),
"HostReadCommandsU": pandas.StringDtype(),
"HostWriteCommandsL": pandas.StringDtype(),
"HostWriteCommandsU": pandas.StringDtype(),
"ControllerBusyTimeL": pandas.StringDtype(),
"ControllerBusyTimeU": pandas.StringDtype(),
"PowerCyclesL": pandas.StringDtype(),
"PowerCyclesU": pandas.StringDtype(),
"PowerOnHoursL": pandas.StringDtype(),
"PowerOnHoursU": pandas.StringDtype(),
"UnsafeShutdownsL": pandas.StringDtype(),
"UnsafeShutdownsU": pandas.StringDtype(),
"MediaErrorsL": pandas.StringDtype(),
"MediaErrorsU": pandas.StringDtype(),
"NumErrorInfoLogsL": pandas.StringDtype(),
"NumErrorInfoLogsU": pandas.StringDtype(),
"ProgramFailCountN": pandas.StringDtype(),
"ProgramFailCountR": pandas.StringDtype(),
"EraseFailCountN": pandas.StringDtype(),
"EraseFailCountR": pandas.StringDtype(),
"WearLevelingCountN": pandas.StringDtype(),
"WearLevelingCountR": pandas.StringDtype(),
"E2EErrorDetectCountN": pandas.StringDtype(),
"E2EErrorDetectCountR": pandas.StringDtype(),
"CRCErrorCountN": pandas.StringDtype(),
"CRCErrorCountR": pandas.StringDtype(),
"MediaWearPercentageN": pandas.StringDtype(),
"MediaWearPercentageR": pandas.StringDtype(),
"HostReadsN": pandas.StringDtype(),
"HostReadsR": pandas.StringDtype(),
"TimedWorkloadN": pandas.StringDtype(),
"TimedWorkloadR": pandas.StringDtype(),
"ThermalThrottleStatusN": pandas.StringDtype(),
"ThermalThrottleStatusR": pandas.StringDtype(),
"RetryBuffOverflowCountN": pandas.StringDtype(),
"RetryBuffOverflowCountR": pandas.StringDtype(),
"PLLLockLossCounterN": pandas.StringDtype(),
"PLLLockLossCounterR": pandas.StringDtype(),
"NandBytesWrittenN": pandas.StringDtype(),
"NandBytesWrittenR": pandas.StringDtype(),
"HostBytesWrittenN": pandas.StringDtype(),
"HostBytesWrittenR": pandas.StringDtype(),
"SystemAreaLifeRemainingN": pandas.StringDtype(),
"SystemAreaLifeRemainingR": pandas.StringDtype(),
"RelocatableSectorCountN": pandas.StringDtype(),
"RelocatableSectorCountR": pandas.StringDtype(),
"SoftECCErrorRateN": pandas.StringDtype(),
"SoftECCErrorRateR": pandas.StringDtype(),
"UnexpectedPowerLossN": pandas.StringDtype(),
"UnexpectedPowerLossR": pandas.StringDtype(),
"MediaErrorCountN": pandas.StringDtype(),
"MediaErrorCountR": pandas.StringDtype(),
"NandBytesReadN": pandas.StringDtype(),
"NandBytesReadR": pandas.StringDtype(),
"WarningCompTempTime": pandas.StringDtype(),
"CriticalCompTempTime": pandas.StringDtype(),
"TempSensor1": pandas.StringDtype(),
"TempSensor2": pandas.StringDtype(),
"TempSensor3": pandas.StringDtype(),
"TempSensor4": pandas.StringDtype(),
"TempSensor5": pandas.StringDtype(),
"TempSensor6": pandas.StringDtype(),
"TempSensor7": pandas.StringDtype(),
"TempSensor8": pandas.StringDtype(),
"ThermalManagementTemp1TransitionCount": pandas.StringDtype(),
"ThermalManagementTemp2TransitionCount": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp1": pandas.StringDtype(),
"TotalTimeForThermalManagementTemp2": pandas.StringDtype(),
"Core_Num": pandas.StringDtype(),
"Id1": pandas.StringDtype(), # @todo force rename
"Job_Run_Id": pandas.StringDtype(),
"Stats_Time": pandas.StringDtype(),
"HostReads": pandas.StringDtype(),
"HostWrites": pandas.StringDtype(),
"NandReads": pandas.StringDtype(),
"NandWrites": pandas.StringDtype(),
"ProgramErrors": pandas.StringDtype(),
"EraseErrors": pandas.StringDtype(),
"ErrorCount": pandas.StringDtype(),
"BitErrorsHost1": pandas.StringDtype(),
"BitErrorsHost2": pandas.StringDtype(),
"BitErrorsHost3": pandas.StringDtype(),
"BitErrorsHost4": pandas.StringDtype(),
"BitErrorsHost5": pandas.StringDtype(),
"BitErrorsHost6": pandas.StringDtype(),
"BitErrorsHost7": pandas.StringDtype(),
"BitErrorsHost8": pandas.StringDtype(),
"BitErrorsHost9": pandas.StringDtype(),
"BitErrorsHost10": pandas.StringDtype(),
"BitErrorsHost11": pandas.StringDtype(),
"BitErrorsHost12": pandas.StringDtype(),
"BitErrorsHost13": pandas.StringDtype(),
"BitErrorsHost14": pandas.StringDtype(),
"BitErrorsHost15": pandas.StringDtype(),
"ECCFail": pandas.StringDtype(),
"GrownDefects": pandas.StringDtype(),
"FreeMemory": pandas.StringDtype(),
"WriteAllowance": pandas.StringDtype(),
"ModelString": pandas.StringDtype(),
"ValidBlocks": pandas.StringDtype(),
"TokenBlocks": pandas.StringDtype(),
"SpuriousPFCount": pandas.StringDtype(),
"SpuriousPFLocations1": pandas.StringDtype(),
"SpuriousPFLocations2": pandas.StringDtype(),
"SpuriousPFLocations3": pandas.StringDtype(),
"SpuriousPFLocations4": pandas.StringDtype(),
"SpuriousPFLocations5": pandas.StringDtype(),
"SpuriousPFLocations6": pandas.StringDtype(),
"SpuriousPFLocations7": pandas.StringDtype(),
"SpuriousPFLocations8": | pandas.StringDtype() | pandas.StringDtype |
import pandas as pd
from gurobipy import *
import gurobipy as grb
import matplotlib.pyplot as plt
import numpy as np
from ModelSetUp import *
from CurrModelPara import *
from Curve import *
#from Main_cal_opt import find_optimal_value
class RL_Kernel():
def __init__(self):
#self.reward = None
#self.value = None
#self.action = None
self.alpha = 0.8#0.2
self.date = 'March 07 2019'
self.LAC_last_windows = 0#1#0
self.probabilistic = 1#0#1
self.RT_DA = 1#0#1
self.curr_time = 0
self.curr_scenario = 1
self.current_stage ='training_500'
def main_function(self):
self.Curr_Scenario_Cost_Total = []
self.start = 1
self.end = 2
for curr_scenario in range(self.start, self.end):
self.PSH_Results = []
self.SOC_Results = []
self.curr_scenario_cost_total = 0
for i in range(0, 23):
self.curr_time = i
self.curr_scenario = curr_scenario
self.calculate_optimal_soc()
self.get_final_curve_main()
self.output_psh_soc()
self.output_psh_soc_main()
self.output_curr_cost()
def output_curr_cost(self):
# output the psh and soc
filename = './Output_Curve' + '/PSH_Profitmax_Rolling_Results_' + 'total' + '_' + self.date + '.csv'
self.df_total.to_csv(filename)
# output curr_cost
filename = './Output_Curve' + '/Current_Cost_Total_Results_' + str(
self.curr_scenario) + '_' + self.date + '.csv'
self.df = pd.DataFrame({'Curr_Scenario_Cost_Total': self.Curr_Scenario_Cost_Total})
self.df.to_csv(filename)
def output_psh_soc_main(self):
# add the last one
filename = './Output_Curve' + '/PSH_Profitmax_Rolling_Results_' + str(
self.curr_scenario) + '_' + self.date + '.csv'
if self.SOC_Results[-1] - self.e_system.parameter['EEnd'][0] > 0.1:
self.PSH_Results.append(
(self.SOC_Results[-1] - self.e_system.parameter['EEnd'][0]) * self.psh_system.parameter['GenEfficiency'][0])
else:
self.PSH_Results.append(
(self.SOC_Results[-1] - self.e_system.parameter['EEnd'][0]) / self.psh_system.parameter['PumpEfficiency'][0])
self.SOC_Results.append(self.e_system.parameter['EEnd'][0])
self.df = pd.DataFrame(
{'SOC_Results_' + str(self.curr_scenario): self.SOC_Results, 'PSH_Results_' + str(self.curr_scenario): self.PSH_Results})
# df = pd.DataFrame({'PSH_Results_' + str(curr_scenario): PSH_Results})
# df.to_csv(filename)
if self.curr_scenario == self.start:
self.df_total = self.df
else:
self.df_total = pd.concat([self.df_total, self.df], axis=1)
##calculate total cost
self.Curr_Scenario_Cost_Total.append(self.curr_scenario_cost_total)
def output_psh_soc(self):
self.SOC_Results.append(self.curr_model.optimal_soc_sum)
if self.curr_model.optimal_psh_gen_sum > 1:
self.PSH_Results.append(self.curr_model.optimal_psh_gen_sum)
else:
self.PSH_Results.append(-self.curr_model.optimal_psh_pump_sum)
##output curr cost
self.curr_scenario_cost_total += self.curr_model.curr_cost
#
def calculate_optimal_soc(self):
self.curr_model_para = CurrModelPara(self.LAC_last_windows, self.probabilistic, self.RT_DA, self.date, self.curr_time, self.curr_scenario, self.current_stage)
# LAC_last_windows, probabilistic, RT_DA, date, LAC_bhour, scenario
print('##############################' + 'scenario = ' + str(self.curr_scenario) + ', and curr_time = ' + str(self.curr_time) + '######################################')
print('################################## psh_system set up ##################################')
self.psh_system = PshSystem(self.curr_model_para)
self.psh_system.set_up_parameter()
print(self.psh_system.parameter)
print('################################## e_system set up ##################################')
self.e_system = ESystem(self.curr_model_para)
self.e_system.set_up_parameter()
print(self.e_system.parameter)
print('################################## lmp_system set up ##################################')
self.lmp = LMP(self.curr_model_para)
self.lmp.set_up_parameter()
#print(self.lmp.date)
print('lmp_quantiles=', self.lmp.lmp_quantiles)
print('lmp_scenarios=', self.lmp.lmp_scenarios)
print('lmp_Nlmp_s=', self.lmp.Nlmp_s)
print('################################## curve set up ##################################')
self.old_curve = Curve(100, 0, 3000)
self.old_curve.input_curve(self.curr_time, self.curr_scenario - 1)
print(self.old_curve.segments)
print('################################## ADP training model set up ##################################')
model_1 = Model('DAMarket')
self.curr_model = RLSetUp(self.psh_system, self.e_system, self.lmp, self.old_curve, self.curr_model_para, model_1)
self.curr_model.optimization_model()
self.optimal_soc_sum = self.curr_model.optimal_soc_sum
self.optimal_psh_gen_sum = self.curr_model.optimal_psh_gen_sum
self.optimal_psh_pump_sum = self.curr_model.optimal_psh_pump_sum
print(self.curr_model.optimal_soc_sum)
def calculate_new_soc(self, initial_soc):
pre_model = CurrModelPara(self.LAC_last_windows, self.probabilistic, self.RT_DA, self.date, self.curr_time,
self.curr_scenario, self.current_stage)
# LAC_last_windows, probabilistic, RT_DA, date, LAC_bhour, scenario
psh_system_2 = PshSystem(pre_model)
psh_system_2.set_up_parameter()
e_system_2 = ESystem(pre_model)
e_system_2.set_up_parameter()
e_system_2.parameter['EStart'] = initial_soc
print('e_system_2.parameter is ' + str(e_system_2.parameter))
if self.curr_time != 22:
# lmp, time = t+1, scenario= n
self.prev_model = CurrModelPara(self.LAC_last_windows, self.probabilistic, self.RT_DA, self.date, self.curr_time + 1,
self.curr_scenario, self.current_stage)
self.prev_lmp = LMP(self.prev_model)
self.prev_lmp.set_up_parameter()
# curve, time = t+1, scenario= n-1
self.pre_curve = Curve(100, 0, 3000)
self.pre_curve.input_curve(self.curr_time + 1, self.curr_scenario - 1)
elif self.curr_time == 22:
self.prev_model = CurrModelPara(self.LAC_last_windows, self.probabilistic, self.RT_DA, self.date, self.curr_time,
self.curr_scenario, self.current_stage)
self.prev_lmp = LMP(self.prev_model)
self.prev_lmp.set_up_parameter()
self.pre_curve = Curve(100, 0, 3000)
self.pre_curve.input_curve(self.curr_time, self.curr_scenario - 1)
model_1 = Model('DAMarket')
#ADP_train_model_para = pre_model
a = self.prev_lmp.lmp_scenarios
print(a)
b = self.pre_curve.point_Y
print(b)
pre_model = RLSetUp(psh_system_2, e_system_2, self.prev_lmp, self.pre_curve, pre_model, model_1)
pre_model.optimization_model_with_input()
rt = pre_model.optimal_profit
return rt
#after we get the current self.optimal_profit and self.optimal_soc_sum, we have to update the curve
def get_final_curve_main(self):
self.get_new_curve_step_1() # 基于此次最优解的model
print(self.curve.segments)
self.get_new_curve_step_2_curve_comb() # (1-\alpha)*old_curve + \alpha*old_curve
print(self.second_curve_slope)
# new curve: self.new_curve_slope
self.get_new_curve_step_3_two_pts() # update the new curve with the two new points
# new points: self.update_point_1 and self.update_point_2
self.curve.curve_update(self.new_curve_slope, self.update_point_1, self.update_point_2)
print(self.curve.segments)
self.output_curve()
self.output_curve_sum()
def get_new_curve_step_1(self):
#how can we get each new curve_point_X
self.curve = self.old_curve
self.second_curve_soc = self.curve.point_X
#get new curve_profit
self.second_curve_profit = []
beta = 0.001
# make sure its terminal soc works
self.check_soc_curve = []
#here need parallel
for value in self.second_curve_soc:
distance = value - float(self.e_system.parameter['EEnd'])
left_cod = distance <= 0 and (abs(distance) < (23 - self.curr_time) * float(self.psh_system.parameter['PumpMax']) * (float(self.psh_system.parameter['PumpEfficiency'])-beta) )
right_cod = distance > 0 and (abs(distance) < (23 - self.curr_time) * float(self.psh_system.parameter['GenMax']) / (float(self.psh_system.parameter['GenEfficiency'])+beta) )
# left_value = (value - float(self.e_system.parameter['EEnd'])) - (
# (23 - self.curr_time) * float(self.psh_system.parameter['GenMax']) / (
# float(self.psh_system.parameter['GenEfficiency']) + beta))
# right_value = (value - float(self.e_system.parameter['EEnd'])) - (
# -(23 - self.curr_time) * float(self.psh_system.parameter['PumpMax']) * (
# float(self.psh_system.parameter['PumpEfficiency']) - beta))
if left_cod or right_cod:
#if left_value < 0 and right_value > 0:
point_y = self.calculate_new_soc(value)
check = 1
else:
#point_y = 0
point_y = -1000000 #self.calculate_pts(value)
check = 0
#FIND the left and right point of using cal_new_soc
self.second_curve_profit.append(point_y)
self.check_soc_curve.append(check)
#find the boundary point
self.left = 0
self.right = len(self.check_soc_curve) - 1
for item in range(len(self.check_soc_curve)):
if self.check_soc_curve[0] == 1:
self.left = 0
elif item != len(self.check_soc_curve)-1 and (self.check_soc_curve[item] == 0 and self.check_soc_curve[item + 1] == 1):
self.left = item + 1
elif item != len(self.check_soc_curve)-1 and (self.check_soc_curve[item] == 1 and self.check_soc_curve[item + 1] == 0):
self.right = item
elif item == len(self.check_soc_curve)-1 and self.check_soc_curve[item] == 1:
self.right = item
#get new curve_slope
self.second_curve_slope = [self.old_curve.intial_slope_set]
for index in range(1, len(self.second_curve_soc)):
temp_slop = (self.second_curve_profit[index] - self.second_curve_profit[index - 1])/self.curve.steps
self.second_curve_slope.append(temp_slop)
#change the first back
#self.second_curve_slope[0] = self.second_curve_slope.intial_slope_set
#make sure it is convex
for i in range(len(self.second_curve_slope)):
if i < self.left + 1:
self.second_curve_slope[i] = 10000 #self.second_curve_slope[self.left + 1]
#self.old_curve.point_Y[i] = 10000
elif i == self.left:
self.second_curve_slope[i] == self.second_curve_slope[self.left+1]
elif i > self.right:
self.second_curve_slope[i] = -10000 #self.second_curve_slope[self.right]
#self.old_curve.point_Y[i] = 10000
print(self.second_curve_slope)
print(self.second_curve_slope)
# _cur = len(self.second_curve_slope) - i - 1
# if self.check_soc_curve[i] == 0:
# if _cur != 0 and self.second_curve_slope[_cur] > self.second_curve_slope[_cur-1] and self.second_curve_slope[_cur] < self.old_curve.intial_slope_set:
# self.second_curve_slope[_cur - 1] = self.second_curve_slope[_cur]
# elif _cur != 0 and self.second_curve_slope[_cur] > self.second_curve_slope[_cur-1] and self.second_curve_slope[_cur] > self.old_curve.intial_slope_set:
# self.second_curve_slope[_cur] = self.old_curve.intial_slope_set
# self.second_curve_slope[_cur - 1] = self.old_curve.intial_slope_set
def get_new_curve_step_2_curve_comb(self):
#new curve combine with the old_slope
self.new_curve_slope = []
for i in range(len(self.second_curve_soc)):
_temp = (1 - self.alpha)*self.old_curve.point_Y[i] + self.alpha*self.second_curve_slope[i]
self.new_curve_slope.append(_temp) #this is the new slope we need
print(self.new_curve_slope)
def get_new_curve_step_3_two_pts(self):
#need find another point #be careful boundary case
# get second point
# get second point profit
if self.optimal_soc_sum + 1 > self.curve.up_bd:
self.second_point_soc_sum = self.optimal_soc_sum - 1#self.curve.steps
self.second_point_profit = self.calculate_new_soc(self.second_point_soc_sum)
else:
self.second_point_soc_sum = self.optimal_soc_sum + 1 #self.curve.steps
self.second_point_profit = self.calculate_new_soc(self.second_point_soc_sum)
# get previous point profit
if self.optimal_soc_sum - 1 < self.curve.up_bd:
self.previous_point_soc_sum = self.optimal_soc_sum + 1 #self.curve.steps
self.previous_point_profit = self.calculate_new_soc(self.previous_point_soc_sum)
else:
self.previous_point_soc_sum = self.optimal_soc_sum - 1 #self.curve.steps
self.previous_point_profit = self.calculate_new_soc(self.previous_point_soc_sum)
# shall we get the optimal at previous???
self.pre_scen_optimal_profit = self.calculate_new_soc(self.optimal_soc_sum)
#calcuate self.update_point_1/2(point_x, point_curve)
if self.optimal_soc_sum + 1 > self.curve.up_bd:
# self.optimal_profit and self.optimal_soc_sum
self.update_point_1_x = self.optimal_soc_sum
self.update_point_1_y = (self.pre_scen_optimal_profit - self.previous_point_profit) #self.curve.steps
#
self.update_point_2_x = self.optimal_soc_sum
self.update_point_2_y = (self.pre_scen_optimal_profit - self.previous_point_profit) #self.curve.steps
elif self.optimal_soc_sum - 1 < self.curve.lo_bd:
#self.optimal_profit and self.optimal_soc_sum
self.update_point_1_x = self.optimal_soc_sum
self.update_point_1_y = (self.second_point_profit - self.pre_scen_optimal_profit) #self.curve.steps
##这里写错了,到底是update前面的点,还是这个点?
self.update_point_2_x = self.optimal_soc_sum
self.update_point_2_y = (self.second_point_profit - self.pre_scen_optimal_profit) #self.curve.steps
else:
self.update_point_1_x = self.optimal_soc_sum
self.update_point_1_y = (self.pre_scen_optimal_profit - self.previous_point_profit) #self.curve.steps
self.update_point_2_x = self.second_point_soc_sum
self.update_point_2_y = (self.second_point_profit - self.pre_scen_optimal_profit) #self.curve.steps
self.update_point_1 = [self.update_point_1_x, self.update_point_1_y]
self.update_point_2 = [self.update_point_2_x, self.update_point_2_y]
def output_curve(self):
#output the curve
scenario = self.curr_scenario
filename = self.e_system.e_start_folder + '/Curve_' + 'time_' + str(self.curr_model_para.LAC_bhour) + '_scenario_' + str(scenario) + '.csv'
df = pd.DataFrame(self.curve.segments, columns =['soc_segment','slope'])
df.to_csv(filename, index=False, header=True)
def output_curve_sum(self):
#input the original
curr_time = self.curr_model_para.LAC_bhour
scenario = self.curr_model_para.scenario
if scenario == 1:
filename = self.e_system.e_start_folder + '/Curve_' + 'time_' + str(curr_time) + '_scenario_' + str(scenario) + '.csv'
df = pd.read_csv(filename)
else:
filename = self.e_system.e_start_folder + '/Curve_total_' + 'time_' + str(self.curr_model_para.LAC_bhour) + '.csv'
df = | pd.read_csv(filename) | pandas.read_csv |
# Imports
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
import io
import csv
import urllib.request
from urllib.error import HTTPError
from os.path import dirname, join
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.holtwinters import SimpleExpSmoothing, ExponentialSmoothing
from statsmodels.tsa.stattools import adfuller
from com.chaquo.python import Python
# A custom Exception to raise and return to the app
# When an Error Exception is raised, the message will appear as
# "Error: <message>" to the user
# Instead of raise a particular Exception, raise an Error
# with a message specific to that exception
class Error(Exception):
pass
# Function to read in a dataset at a specified url and save it locally
def read_dataset(url):
# Variable to store the dataset at the specified URL
dataset = None
# Check if file at url has a header
try:
has_header = check_for_header(url)
except UnicodeDecodeError:
raise Error('Invalid file type. If the file is encoded, it should be encoded with utf-8. '
'Compressed files, such as a .zip file, are not accepted.')
except Exception:
raise Error('An unknown error occurred when attempting to read the dataset from the URL. '
'Please try again, or use a different URL.')
# has_header could be an error message
# If so, return it without proceeding
if isinstance(has_header, str):
return has_header
# Handle exceptions when reading the dataset
try:
if has_header:
# If there is a header, it will be inferred
dataset = pd.read_csv(url, skip_blank_lines=True, encoding='utf_8')
else:
# If there is no header, prefix 'Column ' to each column heading
dataset = pd.read_csv(url, header=None, prefix='Column ', skip_blank_lines=True,
encoding='utf_8')
except Exception:
raise Error('An unknown error occurred when reading from the URL. '
'Please ensure the URL is correct and try again.')
finally:
try:
# Assert that a dataset was retrieved
assert dataset is not None
assert isinstance(dataset, pd.DataFrame)
assert not dataset.empty
except AssertionError:
raise Error('The file located at the URL provided is either not a .csv, or is empty. '
'Please try again with a different URL.')
# ***Code referenced from
# ***https://www.youtube.com/watch?v=sm02Q91ujfs&list=PLeOtHc_su2eXZuiqCH4pBgV6vamBbP88K&index=7
# Get the local file directory for the application
files_dir = str(Python.getPlatform().getApplication().getFilesDir())
# Join 'dataset.csv' to the directory path to form the full file path
file_name = join(dirname(files_dir), 'dataset.csv')
# ***End reference
# Save the dataset to the directory as 'dataset.csv'
dataset.to_csv(file_name, index=False)
# Return the path to the file
return file_name
# Function to generate a preview of a dataset in a matplotlib table and
# return the plot as a byte array
def step_one(file_name):
try:
# Read the local copy of the dataset
dataset = pd.read_csv(file_name, skip_blank_lines=True)
except FileNotFoundError:
raise Error('Local copy of dataset could not be read.')
# Check if it is a DataFrame
if not isinstance(dataset, pd.DataFrame):
raise Error('Local copy of file is not a dataset. '
'Please try again with a different URL.')
nrows = 5 # Number of rows to display
ncols = len(dataset.columns) # Number of columns in the dataset
# Display the first few rows as a preview
ds_head = dataset.head(nrows)
# Create the plot, figure size is (number of columns * 3.5 wide, number of rows high)
fig, ax = plt.subplots(figsize=(ncols * 3.5, nrows))
# Plot the table
ds_table = ax.table(
cellText=ds_head.values, # The contents of each cell
rowLabels=ds_head.index, # The label for each row (index)
colLabels=ds_head.columns, # The label for each column (header)
loc='center' # The location of the table in the figure
)
fontsize = 40 # Font size of the table cells
ds_table.set_fontsize(fontsize)
# Scale the table to increase row height (column width handled below)
ds_table.scale(1, 4)
# Set auto column width and font size to reduce white space
ds_table.auto_set_column_width(np.arange(len(ds_head.columns)))
ds_table.auto_set_font_size(True)
# Disable original plot axes
ax.set_axis_off()
# Create an IO buffer
io_buff = io.BytesIO()
# Save the plot to the buffer in png format
plt.savefig(io_buff, format="png")
# Return the contents of the buffer
buffer = io_buff.getvalue()
return buffer
# Function to determine if a csv file has a header
def check_for_header(url):
# Write contents of url to a file (temp.csv)
temp_filename = join(dirname(__file__), 'temp.csv')
# Use urlretrieve to write the contents of the url to a file (temp.csv)
try:
urllib.request.urlretrieve(url, temp_filename)
except HTTPError as httpe:
# Check for a 404 error, otherwise return a general message
if httpe.code == 404:
raise Error('A file could not be found at the specified URL. '
'Please ensure the URL is correct and try again')
else:
raise Error('A HTTP ' + str(httpe.code) + ' error occurred.')
except UnicodeDecodeError:
raise Error('Invalid file type. If the file is encoded, it should be encoded with utf-8. '
'Compressed files, such as a .zip file, are not accepted.')
# Open the local file
try:
with open(temp_filename) as file:
# Read the first 1024 characters to determine whether the first row is consistent with
# subsequent rows (i.e. string vs. integer)
try:
header = csv.Sniffer().has_header(file.read(2048))
except OSError:
raise Error('A header could not be determined. '
'Please try again with a different dataset.')
except OSError:
raise Error('An error occurred when attempting to discover a header in the dataset. '
'Please try again with a different URL')
# Return a boolean value which represents whether the dataset has a header or not
return header
# Function to generate a summary of the dataset
def dataset_summary(file_name):
try:
# Read the local copy of the dataset
dataset = pd.read_csv(file_name, skip_blank_lines=True)
except FileNotFoundError:
raise Error('Local copy of dataset could not be read.')
preview_rows = 5 # Number of rows to preview
nrows = dataset.shape[0] # Number of rows in the dataset
ncols = dataset.shape[1] # Number of columns in the dataset
# Create and return summary String
summary = ('The dataset has ' + str(nrows) + ' rows and ' +
str(ncols) + ' columns. ' +
'Above is a preview of the first ' + str(preview_rows) + ' rows.')
return summary
# Function to return the names of a dataset's columns
def get_column_names(file_name):
try:
# Read the local copy of the dataset
dataset = pd.read_csv(file_name, skip_blank_lines=True)
except FileNotFoundError:
raise Error('Local copy of dataset could not be read.')
# Return the column names as a list
return list(dataset.columns.values)
# Function to generate a DataFrame using just user specified columns and save it as a file
def read_model_data(file_name, xlabel='x-axis', ylabel='y-axis', first_last='', row_limit=''):
try:
# Read the local copy of the dataset
dataset = pd.read_csv(file_name, skip_blank_lines=True)
except FileNotFoundError:
raise Error('Local copy of dataset could not be read.')
# Check whether the user wishes to limit the number of rows
if first_last != '' and row_limit != '' and row_limit is not None:
rl = abs(int(row_limit))
# Depending on whether First or Last was chosen, slice the dataset accordingly
if first_last == 'First':
limited_dataset = dataset[:rl]
else:
limited_dataset = dataset[-rl:]
# Create the model_data DataFrame with the limited number of rows
model_data = pd.DataFrame(limited_dataset[ylabel])
model_data.index = limited_dataset[xlabel]
else:
# Create the model_data DataFrame with all the rows
model_data = pd.DataFrame(dataset[ylabel])
model_data.index = dataset[xlabel]
# ***Code referenced from
# ***https://www.youtube.com/watch?v=sm02Q91ujfs&list=PLeOtHc_su2eXZuiqCH4pBgV6vamBbP88K&index=7
# Get the local file directory for the application
files_dir = str(Python.getPlatform().getApplication().getFilesDir())
# Join 'model_data.csv' to the directory path to form the full file path
file_name = join(dirname(files_dir), 'model_data.csv')
# ***End reference
# Save the dataset to the directory as 'dataset.csv'
model_data.to_csv(file_name, index=True)
# Return the path to the file
return file_name
# Function to return a dataset file as a DataFrame
def get_dataframe(file_name, xlabel='x-axis', ylabel='y-label'):
# Read the file
dataset = pd.read_csv(file_name, skip_blank_lines=True, header=0)
# Limit the columns to those set by the user
dataset_df = pd.DataFrame(dataset[ylabel])
dataset_df.index = dataset[xlabel]
# Return the DataFrame
return dataset_df
# Function to plot a Dataframe
def dataframe_plot(model_data, graph_choice, xlabel='x-axis', ylabel='y-axis',
title='Title of Line Graph'):
# Get the number of rows in the DataFrame
nrows = model_data.shape[0]
# List of places to display the labels for pie charts
# Display a label every 10% of the dataset's length
pie_labels = [
int(nrows * 0.1),
int(nrows * 0.2),
int(nrows * 0.3),
int(nrows * 0.4),
int(nrows * 0.5),
int(nrows * 0.6),
int(nrows * 0.7),
int(nrows * 0.8),
int(nrows * 0.9),
int(nrows),
]
# Set the font size for every future plot
plt.rcParams['font.size'] = '20'
# Set the plot size to be big enough to see
fig, ax = plt.subplots(figsize=(11, 16))
# Set axis labels and title
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.title(title)
if graph_choice != 'Horizontal Bar Chart':
# Limit the number of x-ticks displayed to 10
# ***Code referenced from
# ***https://www.delftstack.com/howto/matplotlib/matplotlib-set-number-of-ticks/
ax.xaxis.set_major_locator(MaxNLocator(10))
# ***End reference
# Rotate the x-axis values by 45 degrees
plt.xticks(rotation=45)
else:
# Limit the number of y-ticks displayed to 10
# ***Code referenced from
# ***https://www.delftstack.com/howto/matplotlib/matplotlib-set-number-of-ticks/
ax.yaxis.set_major_locator(MaxNLocator(10))
# ***End reference
# Catch incorrect data type exceptions when trying to plot data
try:
if graph_choice == 'Line Graph':
ax.plot(model_data, linewidth=2)
elif graph_choice == 'Bar Chart':
ax.bar(model_data.index.values, model_data[ylabel])
elif graph_choice == 'Pie Chart':
# ***Code referenced from
# ***https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.pie.html
# ***https://matplotlib.org/stable/api/text_api.html#matplotlib.text.Text
patches, texts = ax.pie(model_data[ylabel], labels=model_data.index.values,
startangle=90, counterclock=False)
for i, t in enumerate(texts):
if i in pie_labels:
t._visible = True
else:
t._visible = False
texts[0]._visible = True
# ***End reference
# No labels for pie chart
ax.set_xlabel('')
ax.set_ylabel('')
elif graph_choice == 'Horizontal Bar Chart':
ax.barh(model_data.index.values, model_data[ylabel])
# Opposite labels for horizontal bar chart
ax.set_xlabel(ylabel)
ax.set_ylabel(xlabel)
except TypeError:
raise Error('Invalid column choices. The plot could not be displayed')
# Display plot
plt.show()
# Create an IO buffer
io_buff = io.BytesIO()
# Save the plot to the buffer in png format
plt.savefig(io_buff, format="png")
# Return the contents of the buffer
buffer = io_buff.getvalue()
return buffer
# Function to return the number of rows in the a dataset
def model_rows(file_name):
# Read the file
model_data = pd.read_csv(file_name, skip_blank_lines=True, header=0)
# Return the number of rows as a String
nrows = model_data.shape[0]
return str(nrows)
# Function to test whether graphing the selected columns is appropriate
def test_plot(file_name, graph_choice, xlabel='x-axis', ylabel='y-axis'):
# Read in the dataset
dataset = pd.read_csv(file_name, skip_blank_lines=True, header=0)
# Set up the DataFrame according to user preference
test_data = pd.DataFrame(dataset[ylabel])
test_data.index = dataset[xlabel]
# Plot in a small window (will not be seen)
fig, ax = plt.subplots(figsize=(1, 1))
# Catch incorrect data type exceptions when trying to plot data
try:
if graph_choice == 'Line Graph':
ax.plot(test_data)
elif graph_choice == 'Bar Chart':
ax.bar(test_data.index.values, test_data[ylabel])
elif graph_choice == 'Pie Chart':
ax.pie(test_data[ylabel])
elif graph_choice == 'Horizontal Bar Chart':
ax.barh(test_data.index.values, test_data[ylabel])
except (TypeError, ValueError):
raise Error('Unsuitable columns have been selected for graphing. Please try again.')
except Exception:
raise Error(
'An unknown error has occurred when attempting to plot the data. Please try again.')
plt.show()
# Function to replace the index of a dataset with a suitable pandas Index
def replace_index(data, index_name='Index'):
# If the index is made up of integers, convert to a RangeIndex
if data.index.dtype == 'int64':
# Assuming the data is not missing any steps
index_step = data.index[1] - data.index[0]
data.index = pd.RangeIndex(start=data.index[0], stop=(len(data.index) - 1) + index_step,
step=index_step)
else:
try:
# Assuming a non-integer index contains dates, attempt to convert to DatetimeIndex
data.index = | pd.DatetimeIndex(data=data.index.values, freq='infer') | pandas.DatetimeIndex |
import pandas as pd
df_preds = pd.read_pickle("pickled_predictions4.df")
print(df_preds.columns)
df_mcs = pd.read_pickle("pickled_panda6.df")
print(df_mcs.columns)
df = pd.concat([df_preds,df_mcs], ignore_index=True)
df2= | pd.DataFrame() | pandas.DataFrame |
import pymongo
import pandas as pd
import datetime
import sys
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
DBserver = pymongo.MongoClient('mongodb://localhost:27017/')
DB = DBserver['ir']
collection = DB['usermsgs']
output = []
df = | pd.DataFrame(columns=['name', 'time_slot', 'mode', 'type', 'quiz_number', 'action_number']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from datetime import timedelta
import operator
from string import ascii_lowercase
import warnings
import numpy as np
import pytest
from pandas.compat import lrange
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Categorical, DataFrame, MultiIndex, Series, Timestamp, date_range, isna,
notna, to_datetime, to_timedelta)
import pandas.core.algorithms as algorithms
import pandas.core.nanops as nanops
import pandas.util.testing as tm
def assert_stat_op_calc(opname, alternative, frame, has_skipna=True,
check_dtype=True, check_dates=False,
check_less_precise=False, skipna_alternative=None):
"""
Check that operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
check_dtype : bool, default True
Whether the dtypes of the result of "frame.opname()" and
"alternative(frame)" should be checked.
check_dates : bool, default false
Whether opname should be tested on a Datetime Series
check_less_precise : bool, default False
Whether results should only be compared approximately;
passed on to tm.assert_series_equal
skipna_alternative : function, default None
NaN-safe version of alternative
"""
f = getattr(frame, opname)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
result = getattr(df, opname)()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, opname)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if opname in ['sum', 'prod']:
expected = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, expected, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname in ['sum', 'prod']:
unit = 1 if opname == 'prod' else 0 # result for empty sum/prod
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
def assert_stat_op_api(opname, float_frame, float_string_frame,
has_numeric_only=False):
"""
Check that API for operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_numeric_only : bool, default False
Whether the method "opname" has the kwarg "numeric_only"
"""
# make sure works on mixed-type frame
getattr(float_string_frame, opname)(axis=0)
getattr(float_string_frame, opname)(axis=1)
if has_numeric_only:
getattr(float_string_frame, opname)(axis=0, numeric_only=True)
getattr(float_string_frame, opname)(axis=1, numeric_only=True)
getattr(float_frame, opname)(axis=0, numeric_only=False)
getattr(float_frame, opname)(axis=1, numeric_only=False)
def assert_bool_op_calc(opname, alternative, frame, has_skipna=True):
"""
Check that bool operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
alternative : function
Function that opname is tested against; i.e. "frame.opname()" should
equal "alternative(frame)".
frame : DataFrame
The object that the tests are executed on
has_skipna : bool, default True
Whether the method "opname" has the kwarg "skip_na"
"""
f = getattr(frame, opname)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# bad axis
with pytest.raises(ValueError, match='No axis named 2'):
f(axis=2)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, opname)(axis=0)
r1 = getattr(all_na, opname)(axis=1)
if opname == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
def assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=False):
"""
Check that API for boolean operator opname works as advertised on frame
Parameters
----------
opname : string
Name of the operator to test on frame
float_frame : DataFrame
DataFrame with columns of type float
float_string_frame : DataFrame
DataFrame with both float and string columns
has_bool_only : bool, default False
Whether the method "opname" has the kwarg "bool_only"
"""
# make sure op works on mixed-type frame
mixed = float_string_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0.5
getattr(mixed, opname)(axis=0)
getattr(mixed, opname)(axis=1)
if has_bool_only:
getattr(mixed, opname)(axis=0, bool_only=True)
getattr(mixed, opname)(axis=1, bool_only=True)
getattr(bool_frame_with_na, opname)(axis=0, bool_only=False)
getattr(bool_frame_with_na, opname)(axis=1, bool_only=False)
class TestDataFrameAnalytics(object):
# ---------------------------------------------------------------------
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self, float_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
self._check_method(float_frame, 'spearman')
def _check_method(self, frame, method='pearson'):
correls = frame.corr(method=method)
expected = frame['A'].corr(frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self, float_frame, float_string_frame):
float_frame['A'][:5] = np.nan
float_frame['B'][5:10] = np.nan
# exclude non-numeric types
result = float_string_frame.corr()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'kendall', 'spearman'])
def test_corr_nooverlap(self, meth):
# nothing in common
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
@pytest.mark.parametrize('meth', ['pearson', 'spearman'])
def test_corr_constant(self, meth):
# constant --> all NA
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore", RuntimeWarning)
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_corr_invalid_method(self):
# GH 22298
df = pd.DataFrame(np.random.normal(size=(10, 2)))
msg = ("method must be either 'pearson', "
"'spearman', 'kendall', or a callable, ")
with pytest.raises(ValueError, match=msg):
df.corr(method="____")
def test_cov(self, float_frame, float_string_frame):
# min_periods no NAs (corner case)
expected = float_frame.cov()
result = float_frame.cov(min_periods=len(float_frame))
tm.assert_frame_equal(expected, result)
result = float_frame.cov(min_periods=len(float_frame) + 1)
assert isna(result.values).all()
# with NAs
frame = float_frame.copy()
frame['A'][:5] = np.nan
frame['B'][5:10] = np.nan
result = float_frame.cov(min_periods=len(float_frame) - 8)
expected = float_frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
float_frame['A'][:5] = np.nan
float_frame['B'][:10] = np.nan
cov = float_frame.cov()
tm.assert_almost_equal(cov['A']['C'],
float_frame['A'].cov(float_frame['C']))
# exclude non-numeric types
result = float_string_frame.cov()
expected = float_string_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self, datetime_frame):
a = datetime_frame
noise = Series(np.random.randn(len(a)), index=a.index)
b = datetime_frame.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(np.random.randn(5, 4), index=index, columns=columns)
df2 = DataFrame(np.random.randn(4, 4),
index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self, datetime_frame):
result = datetime_frame.corrwith(datetime_frame['A'])
expected = datetime_frame.apply(datetime_frame['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_corrwith_index_intersection(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=True).index.sort_values()
expected = df1.columns.intersection(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_index_union(self):
df1 = pd.DataFrame(np.random.random(size=(10, 2)),
columns=["a", "b"])
df2 = pd.DataFrame(np.random.random(size=(10, 3)),
columns=["a", "b", "c"])
result = df1.corrwith(df2, drop=False).index.sort_values()
expected = df1.columns.union(df2.columns).sort_values()
tm.assert_index_equal(result, expected)
def test_corrwith_dup_cols(self):
# GH 21925
df1 = pd.DataFrame(np.vstack([np.arange(10)] * 3).T)
df2 = df1.copy()
df2 = pd.concat((df2, df2[0]), axis=1)
result = df1.corrwith(df2)
expected = pd.Series(np.ones(4), index=[0, 0, 1, 2])
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_spearman(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="spearman")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
@td.skip_if_no_scipy
def test_corrwith_kendall(self):
# GH 21925
df = pd.DataFrame(np.random.random(size=(100, 3)))
result = df.corrwith(df**2, method="kendall")
expected = Series(np.ones(len(result)))
tm.assert_series_equal(result, expected)
# ---------------------------------------------------------------------
# Describe
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
result = df3.describe()
tm.assert_numpy_array_equal(result["cat"].values, result["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
result = df.describe()
tm.assert_frame_equal(result, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(result) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
result = df.describe(include='all')
tm.assert_frame_equal(result, expected)
# ---------------------------------------------------------------------
# Reductions
def test_stat_op_api(self, float_frame, float_string_frame):
assert_stat_op_api('count', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('sum', float_frame, float_string_frame,
has_numeric_only=True)
assert_stat_op_api('nunique', float_frame, float_string_frame)
assert_stat_op_api('mean', float_frame, float_string_frame)
assert_stat_op_api('product', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
assert_stat_op_api('min', float_frame, float_string_frame)
assert_stat_op_api('max', float_frame, float_string_frame)
assert_stat_op_api('mad', float_frame, float_string_frame)
assert_stat_op_api('var', float_frame, float_string_frame)
assert_stat_op_api('std', float_frame, float_string_frame)
assert_stat_op_api('sem', float_frame, float_string_frame)
assert_stat_op_api('median', float_frame, float_string_frame)
try:
from scipy.stats import skew, kurtosis # noqa:F401
assert_stat_op_api('skew', float_frame, float_string_frame)
assert_stat_op_api('kurt', float_frame, float_string_frame)
except ImportError:
pass
def test_stat_op_calc(self, float_frame_with_na, mixed_float_frame):
def count(s):
return notna(s).sum()
def nunique(s):
return len(algorithms.unique1d(s.dropna()))
def mad(x):
return np.abs(x - x.mean()).mean()
def var(x):
return np.var(x, ddof=1)
def std(x):
return np.std(x, ddof=1)
def sem(x):
return np.std(x, ddof=1) / np.sqrt(len(x))
def skewness(x):
from scipy.stats import skew # noqa:F811
if len(x) < 3:
return np.nan
return skew(x, bias=False)
def kurt(x):
from scipy.stats import kurtosis # noqa:F811
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
assert_stat_op_calc('nunique', nunique, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
# mixed types (with upcasting happening)
assert_stat_op_calc('sum', np.sum, mixed_float_frame.astype('float32'),
check_dtype=False, check_less_precise=True)
assert_stat_op_calc('sum', np.sum, float_frame_with_na,
skipna_alternative=np.nansum)
assert_stat_op_calc('mean', np.mean, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('product', np.prod, float_frame_with_na)
assert_stat_op_calc('mad', mad, float_frame_with_na)
assert_stat_op_calc('var', var, float_frame_with_na)
assert_stat_op_calc('std', std, float_frame_with_na)
assert_stat_op_calc('sem', sem, float_frame_with_na)
assert_stat_op_calc('count', count, float_frame_with_na,
has_skipna=False, check_dtype=False,
check_dates=True)
try:
from scipy import skew, kurtosis # noqa:F401
assert_stat_op_calc('skew', skewness, float_frame_with_na)
assert_stat_op_calc('kurt', kurt, float_frame_with_na)
except ImportError:
pass
# TODO: Ensure warning isn't emitted in the first place
@pytest.mark.filterwarnings("ignore:All-NaN:RuntimeWarning")
def test_median(self, float_frame_with_na, int_frame):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
assert_stat_op_calc('median', wrapper, float_frame_with_na,
check_dates=True)
assert_stat_op_calc('median', wrapper, int_frame, check_dtype=False,
check_dates=True)
@pytest.mark.parametrize('method', ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH#676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'], dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('op', ['mean', 'std', 'var',
'skew', 'kurt', 'sem'])
def test_mixed_ops(self, op):
# GH#16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_nunique(self):
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_mixed_datetime_numeric(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
df = pd.DataFrame({"A": [1, 1],
"B": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series([1.0], index=['A'])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'UTC'])
def test_mean_excludeds_datetimes(self, tz):
# https://github.com/pandas-dev/pandas/issues/24752
# Our long-term desired behavior is unclear, but the behavior in
# 0.24.0rc1 was buggy.
df = pd.DataFrame({"A": [pd.Timestamp('2000', tz=tz)] * 2})
result = df.mean()
expected = pd.Series()
tm.assert_series_equal(result, expected)
def test_var_std(self, datetime_frame):
result = datetime_frame.std(ddof=4)
expected = datetime_frame.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = datetime_frame.var(ddof=4)
expected = datetime_frame.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH 9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
msg = r"unsupported operand type\(s\) for -: 'float' and 'str'"
with pytest.raises(TypeError, match=msg):
getattr(df1, meth)(axis=1, numeric_only=False)
msg = "could not convert string to float: 'a'"
with pytest.raises(TypeError, match=msg):
getattr(df2, meth)(axis=1, numeric_only=False)
def test_sem(self, datetime_frame):
result = datetime_frame.sem(ddof=4)
expected = datetime_frame.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_kurt(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
result = mixed.min()
expected = Series([pd.Timedelta(timedelta(seconds=5 * 60 + 5)),
pd.Timedelta(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
empty_frame = DataFrame()
axis0 = empty_frame.sum(0)
axis1 = empty_frame.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self, float_frame):
values = float_frame.values.astype(int)
frame = DataFrame(values, index=float_frame.index,
columns=float_frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self, float_frame):
# ensure this works, bug report
bools = np.isnan(float_frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self, float_frame, float_string_frame):
# unit test when have object data
the_mean = float_string_frame.mean(axis=0)
the_sum = float_string_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(float_string_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = float_string_frame.mean(axis=1)
the_sum = float_string_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
float_frame['bool'] = float_frame['A'] > 0
means = float_frame.mean(0)
assert means['bool'] == float_frame['bool'].values.mean()
def test_stats_mixed_type(self, float_string_frame):
# don't blow up
float_string_frame.std(1)
float_string_frame.var(1)
float_string_frame.mean(1)
float_string_frame.skew(1)
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# ---------------------------------------------------------------------
# Cumulative Reductions - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
# ---------------------------------------------------------------------
# Miscellanea
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_pct_change(self):
# GH#11150
pnl = DataFrame([np.arange(0, 40, 10),
np.arange(0, 40, 10),
np.arange(0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Index of max / min
def test_idxmin(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmin(axis=2)
def test_idxmax(self, float_frame, int_frame):
frame = float_frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, int_frame]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
msg = ("No axis named 2 for object type"
" <class 'pandas.core.frame.DataFrame'>")
with pytest.raises(ValueError, match=msg):
frame.idxmax(axis=2)
# ----------------------------------------------------------------------
# Logical reductions
@pytest.mark.parametrize('opname', ['any', 'all'])
def test_any_all(self, opname, bool_frame_with_na, float_string_frame):
assert_bool_op_calc(opname, getattr(np, opname), bool_frame_with_na,
has_skipna=True)
assert_bool_op_api(opname, bool_frame_with_na, float_string_frame,
has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = | Series([True, True, False], index=['a', 'b', 'c']) | pandas.Series |
import requests
import re
import numpy as np
import pandas as pd
import shutil
'''
Author: <NAME>
Purpose: To extract the Biological Information and images for Pokemon from Bulbapedia.
This is done in four parts. The first part retrieves the bio and CDN directory links.
The second part of the script downloads and stores the Pokemon's image.
The third part creates a vector of booleans for each Pokemon, indicating which of
the 20 selected moves are learnt by that Pokemon.
The final part combines all this data into one comphrensive file.
'''
# Part 1 - biology and imageurl extraction
# Get list of Pokemon Names
df = pd.read_csv('D:/UIP/scraping/pokemonstats.csv', header=0)
pokemon_names = df['Name']
# Lists to store the biological information and bulbapedia image URL for each Pokemon
bio = []
imageurls = []
for i in range(802):
# Handling special cases of Pokemon names with different URL structure
if pokemon_names[i] == 'Nidoran-M':
URL = "https://bulbapedia.bulbagarden.net/wiki/{}_(Pok%C3%A9mon)".format('Nidoran%E2%99%82')
elif pokemon_names[i] == 'Nidoran-F':
URL = "https://bulbapedia.bulbagarden.net/wiki/{}_(Pok%C3%A9mon)".format('Nidoran%E2%99%80')
else:
URL = "https://bulbapedia.bulbagarden.net/wiki/{}_(Pok%C3%A9mon)".format(pokemon_names[i])
# Getting HTML data from bulbapedia page
r = requests.get(URL)
# Searching for html tags with CDN directory
imgloc = re.search(r'<img alt="(.*?) src="(.*?)" width="250"', r.text).group(2)
# Getting CDN sub-directory with Pokemon's image
details = re.search(r'thumb/(.*?).png', imgloc).group(1)
imageurls.append(details)
# Getting the text from the Biology section on Bulbapedia
content = re.search(
'<h2><span class="mw-headline" id="Biology">Biology</span></h2>(.*?)<h2><span class="mw-headline" id="In_the_anime">In the anime</span></h2>',
r.text,
re.DOTALL
).group(1)
# Removing HTML tags and cleaning text
content = re.sub(r'&#.{4};', '', content)
content = re.sub(r'<a href=(.*?)>', '', content)
content = re.sub(r'<(/)?(p|sup|a|b|span|I)>', '', content)
content = re.sub(r'\(Japanese:(.*?)\)', '', content)
content = re.sub(r'<(span) class(.*?)>', '', content)
content = re.sub(r'<img (.*)/>', '', content)
content = re.sub(r'<sup id(.*?)>', '', content)
content = re.sub(r'<div class(.*)>(.*)</div>', '', content)
content = re.sub(r'<br(.*?)/>', '', content)
content = re.sub(r'<(.*)>(.*?)</(.*?)>', '', content)
content = re.sub(r' \.', '.', content)
# Adding Pokemon's bio to the list and notifying user of success
bio.append(content)
print("Completed text retrieval for {}".format(pokemon_names[i]))
# Storing the biological information on a CSV file
bio_data = pd.DataFrame(bio)
bio_data.to_csv('D:/UIP/scraping/pokemonbio.csv')
# Storing image urls on a CSV file for image retrieval in part 2
url_data = pd.DataFrame(imageurls)
url_data.to_csv('D:/UIP/scraping/pokemonimgurls.csv')
# Part 2 - image extraction
# Get list of Pokemon Names
df = pd.read_csv('D:/UIP/scraping/pokemonstats.csv', header=0)
pokemon_names = df['Name']
# Get Pokemon URLs with CDN directory
dfI = pd.read_csv('D:/UIP/scraping/pokemonimgurls.csv')
pokemon_images = dfI['0']
for i in range(802):
# Define URL depending on Pokemon name and CDN folder structure
URL = 'https://cdn.bulbagarden.net/upload/{}.png'.format(pokemon_images[i])
# Stream image content from URL
resp = requests.get(URL, stream=True)
# Create a local file to store image contents
pname = '{}.jpg'.format(pokemon_names[i])
local_image = open(pname, 'wb')
# Decoding image content
resp.raw.decode_content = True
# Storing the stream data on local image file
shutil.copyfileobj(resp.raw, local_image)
# Remove the image url response object.
del resp
# Prints success message
print('Image retrieved for {}'.format(pname))
# Part 3 - Getting data for moves learnt by Pokemon
# Get list of Pokemon Names
df = pd.read_csv('D:/UIP/scraping/pokemonstats.csv', header=0)
pokemon_names = df['Name']
# List of moves to query for
# move_list = ['Bounce', 'Flamethrower', 'Ice_Beam', 'Thunderbolt', 'Sludge_Bomb', 'Iron_Head', 'Brick_Break', 'Dragon_Pulse', 'Absorb',
# 'Wing_Attack', 'Bite', 'Dazzling_Gleam', 'Confusion', 'Rock_Blast', 'Hypnosis', 'High_Jump_Kick', "Dark_Pulse", 'Mud_Shot', 'Scald', 'Bug_Bite']
move_list = ['Frost_Breath', 'Flame_Charge', 'Bug_Bite', 'Discharge', 'Metal_Claw', 'Psyshock', 'Draco_Meteor', 'Stealth_Rock', 'Magnitude', 'Foul_Play', 'Rock_Throw', 'Hex', 'Shadow_Sneak', 'Scald', 'Synthesis', 'Dazzling_Gleam', 'Wing_Attack', 'Close_Combat', 'High_Jump_Kick', 'Aurora_Veil', 'Shift_Gear']
# Array to store boolean values
move_data = np.zeros((len(pokemon_names), len(move_list)))
for j in range(len(move_list)):
# Get Bulbapedia URL of that move
URL = 'https://bulbapedia.bulbagarden.net/wiki/{}_(move)'.format(move_list[j])
r = requests.get(URL)
# Get a list of all Pokemon that learn that move
imgloc = re.findall(
r'<td style="text-align:center;" width="26px"> <a href="/wiki/(.*?)_', r.text)
# Encode the corresponding column in the move_data array as 0 or 1
for i in range(802):
if pokemon_names[i] in imgloc:
move_data[i, j] = 1
# Prints success message
print('Done for {}'.format(move_list[j]))
# Converts array to dataframe and stores as csv for future use
df = pd.DataFrame(move_data, columns=move_list)
df.to_csv('D:/UIP/scraping/pokemonmoves.csv')
# Part 4 - Creating the complete dataset
# Get list of Pokemon Names
df = pd.read_csv('D:/UIP/scraping/pokemonstats.csv', header=0)
pokemon_names = df['Name']
pokemon_type = df['Type1']
pokemon_typeB = df['Type2']
# Get data on biology and moves learnt
dfB = | pd.read_csv('D:/UIP/scraping/pokemonbio.csv', index_col=0) | pandas.read_csv |
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Test the projection module
"""
import unittest
import numpy as np
import pandas as pd
from numpy.polynomial import legendre
from pyrotor.projection import trajectory_to_coef
from pyrotor.projection import trajectories_to_coefs
from pyrotor.projection import compute_weighted_coef
from pyrotor.projection import coef_to_trajectory
def test_trajectory_to_coef():
# Test Legendre
y = pd.DataFrame({"A": [1, 2, 3, 4, 5],
"B": [-4, -1, 4, 11, 20]})
basis_dimension = {"A": 3, "B": 2}
basis_features = basis_dimension
basis = "legendre"
expected_coef = np.array([3., 2., 0., 6., 12.], dtype='float64')
result = trajectory_to_coef(y, basis, basis_features, basis_dimension)
np.testing.assert_almost_equal(expected_coef, result)
# Test B-spline
x = np.linspace(0, 1, 20)
y = pd.DataFrame({"A": x,
"B": x**2})
basis_features = {"knots": [.25, .5, .75], "A": 2, "B": 3}
basis_dimension = {"A": 6, "B": 7}
basis = "bspline"
expected_coef = np.array([0., .125, .375, .625, .875, 1.,
0., 0., 4.16666667e-02, 2.29166667e-01, 5.41666667e-01, 8.33333333e-01, 1.], dtype='float64')
result = trajectory_to_coef(y, basis, basis_features, basis_dimension)
np.testing.assert_almost_equal(expected_coef, result)
def test_trajectories_to_coefs():
# Test Legendre
y = [pd.DataFrame({"A": [1, 2, 3, 4, 5]}),
pd.DataFrame({"A": [-4, -1, 4, 11, 20]})]
basis_dimension = {"A": 2}
basis_features = basis_dimension
basis = "legendre"
expected_coefs_traj_1 = np.array([3., 2.])
expected_coefs_traj_2 = np.array([6., 12.])
n_jobs = None
result = trajectories_to_coefs(y, basis, basis_features, basis_dimension, n_jobs)
result_1 = result[0]
result_2 = result[1]
np.testing.assert_almost_equal(result_1, expected_coefs_traj_1)
np.testing.assert_almost_equal(result_2, expected_coefs_traj_2)
# Test B-spline
x = np.linspace(0, 1, 20)
y = [ | pd.DataFrame({"A": x}) | pandas.DataFrame |
__author__ = '<NAME>'
__email__ = '<EMAIL>'
########################################
# imports
########################################
import pandas as pd
########################################
# Meta-Feature Ranker
########################################
class MetaFeatureRanker:
def __init__(self, meta_feature_scores_dict: dict):
# Create a DataFrame from dict
self._meta_feature_scores_df = | pd.DataFrame.from_dict(meta_feature_scores_dict, orient='index') | pandas.DataFrame.from_dict |
import pandas as pd
import math
# Loads Recipes, defines conversion calculations
def RecipeLoad():
global Recipes_Tools
global Recipes_Ingredients
global Recipes_Details
global Conversions
Recipes_Tools = pd.read_excel(r'C:\Users\franc\Documents\SousChef\Recipes.xlsx', sheet_name='Recipes (Tools)')
Recipes_Ingredients = pd.read_excel(r'C:\Users\franc\Documents\SousChef\Recipes.xlsx', sheet_name='Recipes (Ingredients)')
Recipes_Details = pd.read_excel(r'C:\Users\franc\Documents\SousChef\Recipes.xlsx', sheet_name='Recipes (Details)')
Conversions = pd.read_excel(r'C:\Users\franc\Documents\SousChef\Recipes.xlsx', sheet_name='Conversion Table')
# Merges all Recipe Units with Conversion Table to provide Metric conversion figures for single quantities
x = Recipes_Ingredients.merge(Conversions, how="left", left_on='Units', right_on='Units')
# Uses conversion figures to create measurements for items with quantity > 1
x["Liters"] = x["Liters"].multiply(x["Quantity"])
x["Grams"] = x["Grams"].multiply(x["Quantity"])
# Create variable "Generic Type" to track what Units are being used for each item
x["Generic Type"] = x["Units"]
x.loc[~ pd.isna(x["Grams"]), "Generic Type"] = "Grams"
x.loc[~ pd.isna(x["Liters"]), "Generic Type"] = "Liters"
x["Items"] = 0
# This is ugly, but it assigns Quantity of Items to the
# Measurement Value when the item is not measured in Liters or Grams
x.loc[pd.isna(x["Grams"]) & pd.isna(x["Liters"]), "Items"] = x.loc[ | pd.isna(x["Grams"]) | pandas.isna |
import random
import json
import math
import pickle
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pandas as pd
import seaborn as sns
import missingno as msno
from sklearn import datasets
from sklearn import model_selection
from sklearn import preprocessing
from sklearn import metrics
from sklearn import ensemble
from sklearn import svm
def _plot_one_against_original_data(df_original: pd.DataFrame, df_resampled: pd.DataFrame):
df_resampled_renamed = df_resampled.rename(columns={'power': 'power_resampled'}, inplace=False)
ax = df_original.plot()
df_resampled_renamed.plot(ax=ax)
# plt.show()
def resample_comparison():
dates = pd.date_range('20191204', periods=30, freq='5s')
df = pd.DataFrame(
{'power': np.random.randint(low=0, high=50, size=len(dates))},
index=dates
)
df_resampled_mean = df.resample('6s').mean()
df_resampled_nearest = df.resample('6s').nearest()
df_resampled_ffill = df.resample('6s').ffill()
df_resampled_bfill = df.resample('6s').bfill()
df_plt, = plt.plot(df)
mean_plt, = plt.plot(df_resampled_mean)
nearest_plt, = plt.plot(df_resampled_nearest)
bfill_plt, = plt.plot(df_resampled_bfill)
ffill_plt, = plt.plot(df_resampled_ffill)
plt.legend(
[df_plt, mean_plt, nearest_plt, bfill_plt, ffill_plt],
['df', 'mean', 'nearest', 'bfill', 'ffill']
)
plt.show()
_plot_one_against_original_data(df, df_resampled_nearest)
_plot_one_against_original_data(df, df_resampled_mean)
_plot_one_against_original_data(df, df_resampled_bfill)
_plot_one_against_original_data(df, df_resampled_ffill)
plt.show()
def read_temp_sn(name: str) -> int:
with open('data/additional_info.json') as f:
additional_data = json.load(f)
devices = additional_data['offices']['office_1']['devices']
sn_temp_mid = [d['serialNumber'] for d in devices if d['description'] == name][0]
return sn_temp_mid
def project_check_data():
# sn_temp_mid = read_temp_sn('temperature_middle')
sn_temp_mid = read_temp_sn('radiator_1')
df_temp = pd.read_csv('data/office_1_temperature_supply_points_data_2020-10-13_2020-11-02.csv')
print(df_temp.info())
print(df_temp.describe())
print(df_temp.head(5))
df_temp.rename(columns={'Unnamed: 0': 'time'}, inplace=True)
df_temp.rename(columns={'value': 'temp'}, inplace=True)
df_temp['time'] = pd.to_datetime(df_temp['time'])
df_temp.drop(columns=['unit'], inplace=True)
print(df_temp.info())
df_temp = df_temp[df_temp['serialNumber'] == sn_temp_mid]
print(df_temp.info())
print(df_temp.head(5))
df_temp.set_index('time', inplace=True)
df_temp: pd.DataFrame
# df_temp.plot(kind='scatter')
plt.scatter(df_temp.index, df_temp.temp)
plt.show()
df_target_temp = pd.read_csv('data/office_1_targetTemperature_supply_points_data_2020-10-13_2020-11-01.csv')
df_target_temp.rename(columns={'Unnamed: 0': 'time'}, inplace=True)
df_target_temp.rename(columns={'value': 'target_temp'}, inplace=True)
df_target_temp['time'] = pd.to_datetime(df_target_temp['time'])
df_target_temp.drop(columns=['unit'], inplace=True)
df_target_temp.set_index('time', inplace=True)
df_valve = pd.read_csv('data/office_1_valveLevel_supply_points_data_2020-10-13_2020-11-01.csv')
df_valve.rename(columns={'Unnamed: 0': 'time'}, inplace=True)
df_valve.rename(columns={'value': 'valve'}, inplace=True)
df_valve['time'] = pd.to_datetime(df_valve['time'])
df_valve.drop(columns=['unit'], inplace=True)
df_valve.set_index('time', inplace=True)
df_combined = pd.concat([df_temp, df_target_temp, df_valve])
df_combined = df_combined.resample(pd.Timedelta(minutes=15)).mean().fillna(method='ffill')
print(df_combined.head())
df_combined['temp_last'] = df_combined['temp'].shift(1, fill_value=20)
df_combined['temp_gt'] = df_combined['temp'].shift(-1, fill_value=20.34)
mask = (df_combined.index < '2020-10-27')
df_train = df_combined.loc[mask]
# X_train = df_train.drop(columns=['target_temp', 'temp_last', 'temp_gt']).to_numpy()[1:-1]
X_train = df_train[['temp', 'valve']].to_numpy()[1:-1]
print(X_train[:5])
y_train = df_train['temp_gt'].to_numpy()[1:-1]
reg_rf = ensemble.RandomForestRegressor(random_state=42)
reg_rf.fit(X_train, y_train)
pickle.dump(reg_rf, open('reg_rf.p', 'wb'))
mask = (df_combined.index > '2020-10-27') & (df_combined.index <= '2020-10-28')
df_test = df_combined.loc[mask]
X_test = df_test[['temp', 'valve']].to_numpy()
y_predicted = reg_rf.predict(X_test)
df_test['temp_predicted'] = y_predicted.tolist()
y_test = df_test['temp_gt'].to_numpy()[1:-1]
y_last = df_test['temp_last'].to_numpy()[1:-1]
print(f'mae base: {metrics.mean_absolute_error(y_test, y_last)}')
print(f'mae rf: {metrics.mean_absolute_error(y_test, y_predicted[1:-1])}')
print(f'mse base: {metrics.mean_squared_error(y_test, y_last)}')
print(f'mse rf: {metrics.mean_squared_error(y_test, y_predicted[1:-1])}')
print(df_combined.head(5))
print(df_combined.tail(5))
df_test.drop(columns=['valve', 'temp', 'target_temp'], inplace=True)
df_test.plot()
plt.show()
# df_temp.plot()
# df_target_temp.plot()
# plt.plot(df_temp.index, df_temp.temp)
# plt.plot(df_target_temp.index, df_target_temp.target_temp)
# plt.show()
def do_magic(
temperature: pd.DataFrame,
target_temperature: pd.DataFrame,
valve_level: pd.DataFrame,
serial_number_for_prediction: str
) -> float:
# print(temperature.head(5))
# print(temperature.tail(5))
return 20
def preprocess_time_to_index(d: pd.DataFrame) -> pd.DataFrame:
processed_d = d.rename(columns={'Unnamed: 0': 'time'})
processed_d['time'] = | pd.to_datetime(processed_d['time']) | pandas.to_datetime |
# coding: utf-8
# <NAME> <EMAIL>
# # Simulate exoplanet yield from TESS
# The purpose of this code is to simulate the exoplanet yield from the TESS Mission.
# We do this by taking the various fields that TESS observes and, using a galaxy model,
# put planets orbiting the stars and see whether we can detect that planet.
from __future__ import division, print_function
import numpy as np
import pandas as pd
import astroquery
import matplotlib.pyplot as plt
import glob
import matplotlib
matplotlib.style.use('ggplot')
import occSimFuncs as occFunc
from numpy.random import poisson, beta, uniform
#get_ipython().magic(u'matplotlib inline')
#constants
msun = 1.9891E30
rsun = 695500000.
G = 6.67384E-11
AU = 149597870700.
# lets read our galaxt model files
def add_stellar_props(df):
df['isMdwarf'] = pd.Series((df.CL == 5) & (df.Typ >= 7.), name='isMdwarf' )
df['isGiant'] = pd.Series((df.CL < 5), name='isGiant' )
df['I'] = pd.Series(-1. * (df.VI - df.V), name='I')
df['Teff'] = pd.Series(10**df.LTef , name='Teff')
g = 10**df.logg * 0.01
df['Radius'] = pd.Series(np.sqrt(G*df.Mass*msun / g) / rsun, name='Radius')
return df
def make_fullfov_df(df,consts):
multiple = consts['galmodarea'] / consts['simsize']
numstars = int(df.shape[0] * multiple)
rows = np.random.choice(df.index.values, size=numstars)
newq = df.ix[rows]
return newq.set_index(np.arange(newq.shape[0]))
def make_allplanets_df(df,starid_zp):
df['planetRadius'] = pd.Series()
df['planetPeriod'] = pd.Series()
df['starID'] = pd.Series()
newdf = pd.DataFrame(columns=df.columns)
starID = starid_zp
for thisRow in np.arange(df.shape[0]):
if df.loc[thisRow,'isMdwarf']:
radper = occFunc.Dressing15_select(df.loc[thisRow, 'Nplanets'])
if df.loc[thisRow,'Nplanets'] == 0:
continue
elif df.loc[thisRow,'Nplanets'] == 1:
df.loc[thisRow,'planetRadius'] = radper[0]
df.loc[thisRow,'planetPeriod'] = radper[1]
df.loc[thisRow,'starID'] = starID
newdf = newdf.append(df.loc[thisRow])
starID +=1
elif df.loc[thisRow,'Nplanets'] >= 2:
df.loc[thisRow,'starID'] = starID
for p in np.arange(df.loc[thisRow,'Nplanets']):
df.loc[thisRow,'planetRadius'] = radper[0][p]
df.loc[thisRow,'planetPeriod'] = radper[1][p]
newdf = newdf.append(df.loc[thisRow])
starID +=1
elif not df.loc[thisRow,'isMdwarf']:
radper = occFunc.Fressin13_select(df.loc[thisRow, 'Nplanets'])
if df.loc[thisRow,'Nplanets'] == 0:
continue
elif df.loc[thisRow,'Nplanets'] == 1:
df.loc[thisRow,'planetRadius'] = radper[0]
df.loc[thisRow,'planetPeriod'] = radper[1]
df.loc[thisRow,'starID'] = starID
newdf = newdf.append(df.loc[thisRow])
starID +=1
elif df.loc[thisRow,'Nplanets'] >= 2:
df.loc[thisRow,'starID'] = starID
for p in np.arange(df.loc[thisRow,'Nplanets']):
df.loc[thisRow,'planetRadius'] = radper[0][p]
df.loc[thisRow,'planetPeriod'] = radper[1][p]
newdf = newdf.append(df.loc[thisRow])
starID += 1
newdf.set_index(np.arange(newdf.shape[0]), inplace=True)
return newdf, starID
def make_allplanets_df_vec(df,starid_zp):
# lets refector the above code to make it array operations
totalRows = df['Nplanets'].sum()
df['planetRadius'] = | pd.Series() | pandas.Series |
from genericpath import exists
import warnings
warnings.filterwarnings("ignore")
from yahoo_fin import stock_info as si
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm, gmean, cauchy
import seaborn as sns
from datetime import date, datetime, timedelta
import yfinance as yf
from yahooquery import Ticker
import streamlit as st
from finvizfinance.quote import finvizfinance
from pathlib import Path
from src.tools.functions import company_longName
class MonteCarloCholesky(object):
def __init__(self, date_report):
self.report_date = date_report
self.saveAdvisor = Path(f"data/advisor/{str(self.report_date)[:7]}/{self.report_date}/")
if not self.saveAdvisor.exists():
self.saveAdvisor.mkdir(parents=True)
def import_stock_data(self, tickers, saveName="xxx", start="2010-1-1", end=datetime.today().strftime("%Y-%m-%d"),):
y = Ticker(
tickers,
asynchronous=True,
backoff_factor=0.34,
formatted=True,
progress=True,
validate=True,
verify=False,
).history(
start=start,
end=end,
interval='1d'
).reset_index().set_index('date')
df = pd.DataFrame()
for i in tickers:
z = pd.DataFrame(y[y['symbol'] == i]['adjclose'])
df[i] = z
return df.round(2)
def log_returns(self, data):
return np.log(1 + data.pct_change())
def simple_returns(self, data):
return (data / data.shift(1)) - 1
def drift_calc(self, data, return_type="log"):
if return_type == "log":
lr = self.log_returns(data)
elif return_type == "simple":
lr = self.simple_returns(data)
u = lr.mean()
var = lr.var()
drift = u - (0.5 * var)
try:
return drift.values
except:
return drift
def get_tickers(self, data):
tickers = [i for i in data.columns]
return tickers
def probs_find(self, predicted, higherthan, ticker=None, on="value"):
"""
This function calculated the probability of a stock being above a certain threshhold, which can be defined as a value (final stock price) or return rate (percentage change)
Input:
1. predicted: dataframe with all the predicted prices (days and simulations)
2. higherthan: specified threshhold to which compute the probability (ex. 0 on return will compute the probability of at least breakeven)
3. on: 'return' or 'value', the return of the stock or the final value of stock for every simulation over the time specified
4. ticker: specific ticker to compute probability for
"""
if ticker == None:
if on == "return":
predicted0 = predicted.iloc[0, 0]
predicted = predicted.iloc[-1]
predList = list(predicted)
over = [
(i * 100) / predicted0
for i in predList
if ((i - predicted0) * 100) / predicted0 >= higherthan
]
less = [
(i * 100) / predicted0
for i in predList
if ((i - predicted0) * 100) / predicted0 < higherthan
]
elif on == "value":
predicted = predicted.iloc[-1]
predList = list(predicted)
over = [i for i in predList if i >= higherthan]
less = [i for i in predList if i < higherthan]
else:
st.write("'on' must be either value or return")
else:
if on == "return":
predicted = predicted[predicted["ticker"] == ticker]
predicted0 = predicted.iloc[0, 0]
predicted = predicted.iloc[-1]
predList = list(predicted)
over = [
(i * 100) / predicted0
for i in predList
if ((i - predicted0) * 100) / predicted0 >= higherthan
]
less = [
(i * 100) / predicted0
for i in predList
if ((i - predicted0) * 100) / predicted0 < higherthan
]
elif on == "value":
predicted = predicted.iloc[-1]
predList = list(predicted)
over = [i for i in predList if i >= higherthan]
less = [i for i in predList if i < higherthan]
else:
st.write("'on' must be either value or return")
return len(over) / (len(over) + len(less))
def montecarlo_cholesky(self,tickers,days,iterations,start="2010-1-1",show_hist=True,show_line=True,show_stats=True,):
# Get tickers and number of tickers involved in this portfolio
data = self.import_stock_data(tickers, "monteCarloCholesky", start=start)
ticks = self.get_tickers(data)
numstocks = len(ticks)
# Basic information and data
log_return = self.log_returns(data)
# Brownian motion component: drif
drift = self.drift_calc(data)
stdev = self.log_returns(data).std().values
# Cholesky decomposition
covari = log_return.cov()
chol = np.linalg.cholesky(covari)
# Generate uncorralated random variables and use cholesky decomposition to correlate them
uncorr_x = norm.ppf(np.random.rand(numstocks, iterations * days))
corr_x = np.dot(chol, uncorr_x)
# Calculate daily return
corr_2 = np.zeros_like(corr_x)
for i in range(numstocks):
corr_2[i] = np.exp(drift[i] + corr_x[i])
simulatedDF = []
# For every stock, calculate the returns
for s in range(len(ticks)):
ret_reshape = corr_2[s]
ret_reshape = ret_reshape.reshape(days, iterations)
price_list = np.zeros_like(ret_reshape)
price_list[0] = data.iloc[-1, s]
# Calculate returns for the s stock
for t in range(1, days):
price_list[t] = price_list[t - 1] * ret_reshape[t]
# Create dataframe for this run, input the name of stock and load
y = pd.DataFrame(price_list)
y["ticker"] = tickers[s]
cols = y.columns.tolist()
cols = cols[-1:] + cols[:-1]
y = y[cols]
simulatedDF.append(y)
# plotting
if show_hist == True:
x = pd.DataFrame(price_list).iloc[-1]
fig, ax = plt.subplots(1, 2, figsize=(14, 4))
sns.distplot(x, ax=ax[0], axlabel="Stock Price")
sns.distplot(
x,
hist_kws={"cumulative": True},
kde_kws={"cumulative": True},
ax=ax[1],
)
plt.xlabel("Stock Price")
plt.show()
if show_line == True:
y = pd.DataFrame(price_list[:, 0:10]).plot(figsize=(15, 6))
if show_stats == True:
# Printing basic stats
df = yf.download(ticks[s], period="1d")
x = round(float(df["Adj Close"]), 2)
d = si.get_quote_table(ticks[s])
y = d.get('1y Target Est')
st.subheader(f"𝄖𝄗𝄘𝄙𝄚 {company_longName(ticks[s])} [{ticks[s]}]")
st.write(f"* Forcast Days: {days}")
st.write(f"* Current Value: $ {x}")
st.write(f"* Analyst Average 1y Est: ${y}")
st.write(f"* Expected Value: ${round(pd.DataFrame(price_list).iloc[-1].mean(),2)}")
st.write(f"* Return: {round(100*(pd.DataFrame(price_list).iloc[-1].mean()-price_list[0,1])/pd.DataFrame(price_list).iloc[-1].mean(),2)}%")
st.write(f"* Probability of Breakeven: {self.probs_find(pd.DataFrame(price_list), 0, on='return')}")
st.write(' '*25)
simulatedDF = pd.concat(simulatedDF)
return simulatedDF
def market_data_combination(self, tickers, data, mark_ticker="^GSPC", start="2010-1-1"):
ticks = [col for col in data.columns]
if mark_ticker in ticks:
ann_return = np.exp(self.log_returns(data).mean() * 252).values - 1
else:
market_data = self.import_stock_data(mark_ticker, "mkt_data_combo_1", start)
market_rets = self.log_returns(market_data).dropna()
ann_return = np.exp(market_rets.mean() * 252)
data = data.merge(market_data, left_index=True, right_index=True)
data.columns = [tickers[0], tickers[1], tickers[2], "^GSPC"]
return data, ann_return
def beta_sharpe(self,tickers,data,mark_ticker="^GSPC",start="2010-1-1",riskfree=0.025,riskpremium=0.05,):
# Beta
dd, mark_ret = self.market_data_combination(tickers, data, mark_ticker, start)
# load data and annual returns
log_ret = self.log_returns(dd) # create the logarithmic returns of the data
covar = log_ret.cov() * 252 # Annualized covariance matrix
covar = pd.DataFrame(covar.iloc[:-1, -1])
# Get the part of the covariance matrix that is related between the stocks and the market
mrk_var = log_ret.iloc[:, -1].var() * 252 # Annualized variance of market
beta = covar / mrk_var # Now we have our betas!
# Add the standard deviation to the beta dataframe
stdev_ret = pd.DataFrame(((log_ret.std() * 250 ** 0.5)[:-1]), columns=["STD"])
beta = beta.merge(stdev_ret, left_index=True, right_index=True)
# Get tickers of all the stocks in the dataframe used
tickers = self.get_tickers(dd)
# Make dictionary for the annual return of each stock
mark_ret = {tickers[i]: mark_ret[i] for i in range(len(tickers))}
# CAPM
for i, row in beta.iterrows():
beta.at[i, "CAPM"] = riskfree + (
row[mark_ticker] * (mark_ret[mark_ticker] - riskfree)
)
# Sharpe
for i, row in beta.iterrows():
beta.at[i, "Sharpe"] = (row["CAPM"] - riskfree) / (row["STD"])
beta.rename(columns={"^GSPC": "Beta"}, inplace=True)
return beta
def market_data_combination(self, data, mark_ticker="^GSPC", start="2010-1-1"):
market_data = self.import_stock_data(mark_ticker, "mkt_data_combo_2", start)
market_rets = self.log_returns(market_data).dropna()
ann_return = np.exp(market_rets.mean() * 252).values - 1
data = data.merge(market_data, left_index=True, right_index=True)
return data, ann_return
def montecarlo_sharpe_optimal_portfolio(self, tickers, trials=13000, end_date=None, start_date="2020-1-1", riskfree=0.025, plot_eff=True,):
if end_date == None:
end_date = self.report_date
# end_date = datetime.today().strftime("%Y-%m-%d")
data = self.import_stock_data(tickers, "monteCarloSharpe")
allWeights = np.zeros((trials, len(data.columns)))
allReturns = np.zeros(trials)
allVolatility = np.zeros(trials)
allSharpeValues = np.zeros(trials)
log_return = self.log_returns(data)
for t in range(trials):
weights = np.random.rand(len(data.columns))
weights = weights / np.sum(weights)
allWeights[t, :] = weights
allReturns[t] = np.sum((log_return.mean() * weights) * 252)
allVolatility[t] = np.sqrt(
np.dot(weights.T, np.dot(log_return.cov() * 252, weights))
)
allSharpeValues[t] = (allReturns[t] - riskfree) / allVolatility[t]
maxsharpe = allSharpeValues.max()
pointsharpe = allSharpeValues.argmax()
weightSharpe = allWeights[pointsharpe]
x_sharpe = allVolatility[pointsharpe]
y_sharpe = allReturns[pointsharpe]
maxret = allReturns.max()
pointret = allReturns.argmax()
weightRet = allWeights[pointret]
x_ret = allVolatility[pointret]
y_ret = allReturns[pointret]
if plot_eff == True:
st.caption('_'*25)
st.subheader(f"__𝄖𝄗𝄘𝄙𝄚 Graphic Simulation Of Portfolios__")
fig, ax = plt.subplots(figsize=(14, 9))
plt.scatter(allVolatility, allReturns, c=allSharpeValues, cmap="plasma")
plt.colorbar(label="Sharpe Ratio")
plt.xlabel("Volatility")
plt.ylabel("Expected Return")
plt.scatter(x_sharpe, y_sharpe, c="black")
plt.scatter(x_ret, y_ret)
st.pyplot(fig)
optim_dic = []
for i in range(len(tickers)):
optim_dic.append({"ticker": tickers[i], "Weight": weightSharpe[i] * 100})
fin = | pd.DataFrame(optim_dic) | pandas.DataFrame |
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = | pd.DataFrame(expect_collection_expandCategory['pad']) | pandas.DataFrame |
# Need options to upload one to 3 files so i can do if none options
# merge datasets on sch line item doing whiochever merge need it to probabily be efficent
# highlight yellow if status is under review with CSAM or action sdm is Not order created on tool Not processed on LTSI tool
import pandas as pd
from datetime import datetime, timedelta
import io
import streamlit as st
import numpy as np
# ideas
# join the all the columns then merge ?
def app():
# st.set_page_config(page_title='LTSI Feedback Form')
st.write("""
# LTSI Feedback
### Instructions: \n
- Used when SDM feedback is in seperate files
- Upload excel feedback files (up to 3)
- Upload Open Orders File """)
st.write("## Upload 1 to 3 Feedback Files")
feedback1 = st.file_uploader("Upload Feedback File 1", type="xlsx")
feedback2 = st.file_uploader("Upload Feedback File 2", type="xlsx")
feedback3 = st.file_uploader("Upload Feedback File 3", type="xlsx")
st.write("## Upload Open Orders File")
open_orders = st.file_uploader("Upload Open Order File if feedback does not contain all open order rows",
type="xlsx")
if st.button("Create Feedback"):
def download_file(merged):
action_sdm = merged.columns[34]
merged[action_sdm] = merged[action_sdm].str.lower()
merged[action_sdm] = merged[action_sdm].fillna("0")
merged['Status (SS)'] = np.where(merged[action_sdm].str.contains('cancel', regex=False),
'To be cancelled / reduced', merged['Status (SS)'])
merged['Status (SS)'] = np.where(merged[action_sdm].str.contains('block', regex=False),
'Blocked', merged['Status (SS)'])
merged[action_sdm] = merged[action_sdm].astype(str)
merged[action_sdm].replace(['0', '0.0'], '', inplace=True)
# Writing df to Excel Sheet
buffer = io.BytesIO()
with pd.ExcelWriter(buffer, engine='xlsxwriter') as writer:
merged.to_excel(writer, sheet_name='Sheet1', index=False)
workbook = writer.book
worksheet = writer.sheets['Sheet1']
formatdict = {'num_format': 'dd/mm/yyyy'}
fmt = workbook.add_format(formatdict)
worksheet.set_column('K:K', None, fmt)
worksheet.set_column('L:L', None, fmt)
# Light yellow fill with dark yellow text.
number_rows = len(merged.index) + 1
yellow_format = workbook.add_format({'bg_color': '#FFEB9C'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Under Review with CSAM"',
'format': yellow_format})
grey_format = workbook.add_format({'bg_color': '#C0C0C0'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="To be cancelled / reduced"',
'format': grey_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Under Review with C-SAM"',
'format': yellow_format})
red_format = workbook.add_format({'bg_color': '#ffc7ce'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Blocked"',
'format': red_format})
green_format = workbook.add_format({'bg_color': '#c6efce'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Shippable"',
'format': green_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AG2="Scheduled Out"',
'format': green_format})
# COL MIGHT BE AH
grey_format = workbook.add_format({'bg_color': '#C0C0C0'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="To be cancelled / reduced"',
'format': grey_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Under Review with CSAM"',
'format': yellow_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Under Review with C-SAM"',
'format': yellow_format})
red_format = workbook.add_format({'bg_color': '#ffc7ce'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Blocked"',
'format': red_format})
green_format = workbook.add_format({'bg_color': '#c6efce'})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Shippable"',
'format': green_format})
worksheet.conditional_format('A2:AH%d' % (number_rows),
{'type': 'formula',
'criteria': '=$AH2="Scheduled Out"',
'format': green_format})
for column in merged:
column_width = max(merged[column].astype(str).map(len).max(), len(column))
col_idx = merged.columns.get_loc(column)
writer.sheets['Sheet1'].set_column(col_idx, col_idx, column_width)
worksheet.autofilter(0, 0, merged.shape[0], merged.shape[1])
worksheet.set_column(11, 12, 20)
worksheet.set_column(12, 13, 20)
worksheet.set_column(13, 14, 20)
header_format = workbook.add_format({'bold': True,
'bottom': 2,
'bg_color': '#0AB2F7'})
# Write the column headers with the defined format.
for col_num, value in enumerate(merged.columns.values):
worksheet.write(0, col_num, value, header_format)
my_format = workbook.add_format()
my_format.set_align('left')
worksheet.set_column('N:N', None, my_format)
writer.save()
today = datetime.today()
d1 = today.strftime("%d/%m/%Y")
st.write("Download Completed File:")
st.download_button(
label="Download Excel worksheets",
data=buffer,
file_name="LTSI_file_" + d1 + ".xlsx",
mime="application/vnd.ms-excel"
)
def columns_to_keep():
cols = ['sales_org', 'country', 'cust_num', 'customer_name', 'sales_dis', 'rtm', 'sd_line_item',
'order_method', 'del_blk', 'cust_req_date', 'ord_entry_date',
'cust_po_num', 'ship_num', 'ship_cust', 'ship_city', 'plant',
'material_num', 'brand', 'lob', 'project_code', 'material_desc',
'mpn_desc', 'ord_qty', 'shpd_qty', 'delivery_qty', 'remaining_qty',
'delivery_priority', 'opt_delivery_qt', 'rem_mod_opt_qt',
'sch_line_blocked_for_delv']
return cols
def old_feedback_getter(df):
cols = [8]
col_count = 37
if df.shape[1] >= 39:
while col_count < df.shape[1]:
cols.append(col_count)
col_count += 1
return df.iloc[:, cols]
def new_feedback_getter(df):
return df.iloc[:, [8, 34, 35, 36]]
def open_new_feedback_merge(open, new_feedback):
return open.merge(new_feedback, how="left", on="Sales Order and Line Item")
def case2(feedback, open_orders):
feed1 = pd.read_excel(feedback, sheet_name=0, engine="openpyxl")
openOrders = pd.read_excel(open_orders, sheet_name=0, engine="openpyxl")
old_feedback = old_feedback_getter(feed1)
new_feedback = new_feedback_getter(feed1)
open = openOrders.iloc[:, :34]
combined_feedback = open.merge(new_feedback, how="left", on="Sales Order and Line Item")
final = combined_feedback.merge(old_feedback, how="left", on="Sales Order and Line Item")
cols = columns_to_keep()
final.drop_duplicates(subset=cols, keep='first', inplace=True)
download_file(final)
def case3(feedback1, feedback2, open_orders):
feed1 = pd.read_excel(feedback1, sheet_name=0, engine="openpyxl")
feed2 = pd.read_excel(feedback2, sheet_name=0, engine="openpyxl")
openOrders = pd.read_excel(open_orders, sheet_name=0, engine="openpyxl")
old_feedback1 = old_feedback_getter(feed1)
new_feedback1 = new_feedback_getter(feed1)
old_feedback2 = old_feedback_getter(feed2)
new_feedback2 = new_feedback_getter(feed2)
open = openOrders.iloc[:, :34]
joined_new_feedback = pd.concat([new_feedback1, new_feedback2], ignore_index=True)
joined_old_feedback = pd.concat([old_feedback1, old_feedback2], ignore_index=True)
combined_feedback = open.merge(joined_new_feedback, how="left", on="Sales Order and Line Item")
final = combined_feedback.merge(joined_old_feedback, how="left", on="Sales Order and Line Item")
cols = columns_to_keep()
final.drop_duplicates(subset=cols, keep='first', inplace=True)
download_file(final)
def case4(feedback1, feedback2, feedback3, open_orders):
feed1 = pd.read_excel(feedback1, sheet_name=0, engine="openpyxl")
feed2 = pd.read_excel(feedback2, sheet_name=0, engine="openpyxl")
feed3 = pd.read_excel(feedback3, sheet_name=0, engine="openpyxl")
openOrders = pd.read_excel(open_orders, sheet_name=0, engine="openpyxl")
old_feedback1 = old_feedback_getter(feed1)
new_feedback1 = new_feedback_getter(feed1)
old_feedback2 = old_feedback_getter(feed2)
new_feedback2 = new_feedback_getter(feed2)
old_feedback3 = old_feedback_getter(feed3)
new_feedback3 = new_feedback_getter(feed3)
open = openOrders.iloc[:, :34]
joined_new_feedback = pd.concat([new_feedback1, new_feedback2, new_feedback3], ignore_index=True)
joined_old_feedback = pd.concat([old_feedback1, old_feedback2, old_feedback3], ignore_index=True)
combined_feedback = open.merge(joined_new_feedback, how="left", on="Sales Order and Line Item")
final = combined_feedback.merge(joined_old_feedback, how="left", on="Sales Order and Line Item")
cols = columns_to_keep()
final.drop_duplicates(subset=cols, keep='first', inplace=True)
download_file(final)
def case5(feedback1, feedback2):
feed1 = pd.read_excel(feedback1, sheet_name=0, engine="openpyxl")
feed2 = pd.read_excel(feedback2, sheet_name=0, engine="openpyxl")
open = feed1.iloc[:, :34]
old_feedback = old_feedback_getter(feed1)
# drop na
new_feedback1 = new_feedback_getter(feed1)
new_feedback2 = new_feedback_getter(feed2)
new_feedback1 = new_feedback1[new_feedback1.iloc[:, 1].notna()]
new_feedback2 = new_feedback2[new_feedback2.iloc[:, 1].notna()]
joined_new_feedback = pd.concat([new_feedback1, new_feedback2], ignore_index=True)
combined_feedback = open.merge(joined_new_feedback, how="left", on="Sales Order and Line Item")
final = combined_feedback.merge(old_feedback, how="left", on="Sales Order and Line Item")
cols = columns_to_keep()
final.drop_duplicates(subset=cols, keep='first', inplace=True)
download_file(final)
def case6(feedback1, feedback2, feedback3):
feed1 = pd.read_excel(feedback1, sheet_name=0, engine="openpyxl")
feed2 = pd.read_excel(feedback2, sheet_name=0, engine="openpyxl")
feed3 = | pd.read_excel(feedback3, sheet_name=0, engine="openpyxl") | pandas.read_excel |
import numpy as np
import scipy.stats as stat
import joblib
import pickle
import pandas as pd
def map_workclass(df):
workclass_mapper = {
' State-gov': "other",
' Self-emp-not-inc': "other",
' Federal-gov': "other",
' Local-gov': "other",
' ?': "other",
' Self-emp-inc': "other",
' Without-pay': "other",
' Never-worked': "other"
}
df["workclass"] = df.workclass.map(workclass_mapper).fillna(df["workclass"])
return df
def map_education(df):
education_mapper ={' 11th': 'other',
' Masters': 'other',
' 9th': 'other',
' Assoc-acdm': 'other',
' Assoc-voc': 'other',
' 7th-8th': 'other',
' Doctorate': 'other',
' Prof-school': 'other',
' 5th-6th': 'other',
' 10th': 'other',
' 1st-4th': 'other',
' Preschool': 'other',
' 12th': 'other'}
df['education'] = df['education'].map(education_mapper).fillna(df['education'])
return df
def map_marital_status(df):
marital_status_mapper = {' Divorced': 'other',
' Married-spouse-absent': 'other',
' Separated': 'other',
' Married-AF-spouse': 'other',
' Widowed': 'other'}
df['marital-status'] = df['marital-status'].map(marital_status_mapper).fillna(df['marital-status'])
return df
def map_occupation(df):
occupation_mapper = {
' ?': 'Prof-specialty',
' Protective-serv': 'other',
' Armed-Forces': 'other',
' Priv-house-serv': 'other',
' Tech-support': 'other',
' Farming-fishing': 'other',
' Handlers-cleaners': 'other'
}
df['occupation'] = df['occupation'].map(occupation_mapper).fillna(df["occupation"])
return df
def encode_categories(data):
encoder_file="transformers/baseN_encoder.pkl"
df= | pd.DataFrame(data) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pickle
filename = "loading_data/load.csv"
# Loading file using simple editors
cols = None
data = []
with open(filename) as f:
for line in f.readlines():
vals = line.replace("\n","").split(",")
print(vals)
if cols is None:
cols = vals
else:
data.append([float(x) for x in vals])
df = | pd.DataFrame(data,columns=cols) | pandas.DataFrame |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
@version:
@author: zzh
@file: factor_earning_expectation.py
@time: 2019-9-19
"""
import pandas as pd
class FactorEarningExpectation():
"""
盈利预期
"""
def __init__(self):
__str__ = 'factor_earning_expectation'
self.name = '盈利预测'
self.factor_type1 = '盈利预测'
self.factor_type2 = '盈利预测'
self.description = '个股盈利预测因子'
@staticmethod
def NPFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy1']):
"""
:name: 一致预期净利润(FY1)
:desc: 一致预期净利润的未来第一年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy1': 'NPFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['net_profit_fy2']):
"""
:name: 一致预期净利润(FY2)
:desc: 一致预期净利润的未来第二年度的预测
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'net_profit_fy2': 'NPFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy1']):
"""
:name: 一致预期每股收益(FY1)
:desc: 一致预期每股收益未来第一年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy1': 'EPSFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['eps_fy2']):
"""
:name: 一致预期每股收益(FY2)
:desc: 一致预期每股收益未来第二年度的预测均值
:unit: 元
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'eps_fy2': 'EPSFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy1']):
"""
:name: 一致预期营业收入(FY1)
:desc: 一致预期营业收入未来第一年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy1': 'OptIncFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['operating_revenue_fy2']):
"""
:name: 一致预期营业收入(FY2)
:desc: 一致预期营业收入未来第二年度的预测均值
:unit: 元
:view_dimension: 10000
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'operating_revenue_fy2': 'OptIncFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy1']):
"""
:name: 一致预期市盈率(PE)(FY1)
:desc: 一致预期市盈率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy1': 'CEPEFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pe_fy2']):
"""
:name: 一致预期市盈率(PE)(FY2)
:desc: 一致预期市盈率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pe_fy2': 'CEPEFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy1']):
"""
:name: 一致预期市净率(PB)(FY1)
:desc: 一致预期市净率未来第一年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy1': 'CEPBFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPBFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['pb_fy2']):
"""
:name: 一致预期市净率(PB)(FY2)
:desc: 一致预期市净率未来第二年度的预测均值
:unit: 倍
:view_dimension: 1
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'pb_fy2': 'CEPBFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY1(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy1']):
"""
:name: 市盈率相对盈利增长比率(FY1)
:desc: 未来第一年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy1': 'CEPEGFY1'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def CEPEGFY2(tp_earning, factor_earning_expect, trade_date, dependencies=['peg_fy2']):
"""
:name: 市盈率相对盈利增长比率(FY2)
:desc: 未来第二年度市盈率相对盈利增长比率
:unit:
:view_dimension: 0.01
"""
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, dependencies]
earning_expect.rename(columns={'peg_fy2': 'CEPEGFY2'}, inplace=True)
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def _change_rate(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y']) / \
earning_expect[colunm + '_y']
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def _change_value(tp_earning, trade_date, pre_trade_date, colunm, factor_name):
earning_expect = tp_earning[tp_earning['publish_date'] == trade_date].loc[:, colunm]
earning_expect_pre = tp_earning[tp_earning['publish_date'] == pre_trade_date].loc[:, colunm]
earning_expect = pd.merge(earning_expect, earning_expect_pre, on='security_code', how='left')
earning_expect[factor_name] = (earning_expect[colunm + '_x'] - earning_expect[colunm + '_y'])
earning_expect.drop(columns=[colunm + '_x', colunm + '_y'], inplace=True)
return earning_expect
@staticmethod
def NPFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化率_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化
:unit: 元
:view_dimension: 1
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一周
:desc: 未来第一年度一致预测每股收益一周内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'eps_fy1',
'EPSFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_一月
:desc: 未来第一年度一致预测每股收益一月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[2],
'eps_fy1',
'EPSFY11MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY13MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_三月
:desc: 未来第一年度一致预测每股收益三月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[3],
'eps_fy1',
'EPSFY13MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def EPSFY16MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY1)变化率_六月
:desc: 未来第一年度一致预测每股收益六月内预测值变化率
:unit:
:view_dimension: 0.01
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_rate(tp_earning, trade_date, trade_dates[4],
'eps_fy1',
'EPSFY16MRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11WChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一周
:desc: 未来第一年度一致预测净利润一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'net_profit_fy1',
'NPFY11WChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY11MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_一月
:desc: 未来第一年度一致预测净利润一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'net_profit_fy1',
'NPFY11MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY13MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_三月
:desc: 未来第一年度一致预测净利润三月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 4:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[3],
'net_profit_fy1',
'NPFY13MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def NPFY16MChg(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY1)变化_六月
:desc: 未来第一年度一致预测净利润六月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 5:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[4],
'net_profit_fy1',
'NPFY16MChg')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def ChgNPFY1FY2(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测净利润(FY2)与一致预期净利润(FY1)的变化率
:desc: 未来第二年度一致预测净利润与未来第一年度一致预测净利润变化率
:unit:
:view_dimension: 0.01
"""
factor_earning_expect['ChgNPFY1FY2'] = factor_earning_expect['NPFY2'] - factor_earning_expect['NPFY1'] / abs(
factor_earning_expect['NPFY1']) * 100
return factor_earning_expect
@staticmethod
def ChgEPSFY1FY2(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测每股收益(FY2)与一致预期每股收益(FY1)的变化率
:desc: 未来第二年度一致预测每股收益与未来第一年度一致预测每股收益变化率
:unit:
:view_dimension: 0.01
"""
factor_earning_expect['ChgEPSFY1FY2'] = factor_earning_expect['EPSFY2'] - factor_earning_expect['EPSFY1'] / abs(
factor_earning_expect['EPSFY1']) * 100
return factor_earning_expect
@staticmethod
def OptIncFY11WRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_一周
:desc: 未来第一年度一致预测营业收入一周内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 2:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[1],
'operating_revenue_fy1',
'OptIncFY11WRT')
factor_earning_expect = pd.merge(factor_earning_expect, earning_expect, on='security_code')
return factor_earning_expect
@staticmethod
def OptIncFY11MRT(tp_earning, factor_earning_expect, trade_date):
"""
:name: 一致预测营业收入(FY1)变化_一月
:desc: 未来第一年度一致预测营业收入一月内预测值变化
:unit: 元
:view_dimension: 10000
"""
trade_dates = sorted(set(tp_earning['publish_date']), reverse=True)
if len(trade_dates) >= 3:
earning_expect = FactorEarningExpectation._change_value(tp_earning, trade_date, trade_dates[2],
'operating_revenue_fy1',
'OptIncFY11MRT')
factor_earning_expect = | pd.merge(factor_earning_expect, earning_expect, on='security_code') | pandas.merge |
from yahooquery import Ticker
import streamlit as st
import warnings
import pandas as pd
import numpy as np
import yfinance as yf
from itertools import product
import matplotlib.pyplot as plt
import os
# from src.tools.functions import company_longName
warnings.filterwarnings("ignore")
plt.style.use("seaborn-poster")
sm, med, lg = 10, 15, 25
plt.rc("font", size=sm) # controls default text sizes
plt.rc("axes", labelsize=med) # fontsize of the x & y labels
plt.rc("axes", titlesize=med) # fontsize of the axes title
plt.rc("xtick", labelsize=sm) # fontsize of the tick labels
plt.rc("ytick", labelsize=sm) # fontsize of the tick labels
plt.rc("legend", fontsize=med) # legend fontsize
plt.rc("figure", titlesize=lg) # fontsize of the figure title
plt.rc("axes", linewidth=2) # linewidth of plot lines
plt.rcParams["legend.fontsize"] = "medium"
legend_properties = {"weight": "bold"}
plt.rcParams["figure.figsize"] = [13, 6.5]
plt.rcParams["figure.dpi"] = 100
plt.rcParams["legend.shadow"] = True
plt.rcParams["legend.borderpad"] = 0.9
plt.rcParams["legend.framealpha"] = 0.1
plt.rcParams["axes.facecolor"] = "white"
plt.rcParams["axes.edgecolor"] = "black"
plt.rcParams["legend.loc"] = "upper left"
plt.rcParams["legend.frameon"] = True
plt.rcParams["legend.fancybox"] = True
pd.set_option("display.max_rows", 25)
os.environ["NUMEXPR_MAX_THREADS"] = "24"
os.environ["NUMEXPR_NUM_THREADS"] = "12"
class Optimal_Double_Mavg_Crossover(object):
def __init__(self, tic, sName):
self.tic = tic
self.sName = sName
def grab_data(self, hist_per):
ticker = yf.Ticker(self.tic)
self.raw = ticker.history(period=hist_per)
self.raw.columns = ["Open", "High", "Low", self.sName, "Volume", "Dividends", "Stock Splits", ]
SMA1 = 2
SMA2 = 5
data1 = pd.DataFrame(self.raw[self.sName])
data1.columns = [self.sName]
data1["SMA1"] = data1[self.sName].rolling(SMA1).mean()
data1["SMA2"] = data1[self.sName].rolling(SMA2).mean()
data1["Position"] = np.where(data1["SMA1"] > data1["SMA2"], 1, -1)
data1["Returns"] = np.log(data1[self.sName] / data1[self.sName].shift(1))
data1["Strategy"] = data1["Position"].shift(1) * data1["Returns"]
data1.round(4).tail()
data1.dropna(inplace=True)
np.exp(data1[["Returns", "Strategy"]].sum())
np.exp(data1[["Returns", "Strategy"]].std() * 252 ** 0.5)
sma1 = range(2, 76, 2)
sma2 = range(5, 202, 5)
results = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 13 12:29:01 2017
@author: Jones
"""
import pandas as pd
base = pd.read_csv('credit_data.csv')
base.describe()
base.loc[base['age'] < 0]
# apagar a coluna
base.drop('age', 1, inplace=True)
# apagar somente os registros com problema
base.drop(base[base.age < 0].index, inplace=True)
# preencher os valores manualmente
# preencher os valores com a média
base.mean()
base['age'].mean()
base['age'][base.age > 0].mean()
base.loc[base.age < 0, 'age'] = 40.92
pd.isnull(base['age'])
base.loc[ | pd.isnull(base['age']) | pandas.isnull |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import time
import warnings
warnings.filterwarnings('ignore')
import pandas as pd, numpy as np
import math, json, gc, random, os, sys
import torch
import logging
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
from sklearn.model_selection import train_test_split
from catalyst.dl import SupervisedRunner
from catalyst.contrib.dl.callbacks import WandbLogger
from contextlib import contextmanager
from catalyst.dl.callbacks import AccuracyCallback, F1ScoreCallback, OptimizerCallback
#from pytorch_memlab import profile, MemReporter
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# In[2]:
def set_seed(seed: int):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed) # type: ignore
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = True # type: ignore
# In[3]:
set_seed(2020)
# In[4]:
test = pd.read_json('/kaggle/input/stanford-covid-vaccine/test.json', lines=True)
samplesub= pd.read_csv('/kaggle/input/stanford-covid-vaccine/sample_submission.csv')
# In[5]:
bpp_max=[]
bpp_mean =[]
id = test.id.values
for i in id:
probability = np.load('../input/stanford-covid-vaccine'+'/bpps/%s.npy'%i)
bpp_max.append(probability.max(-1).tolist())
bpp_mean.append(probability.mean(-1).tolist())
test['bpp_max']=bpp_max
test['bpp_mean']=bpp_mean
# In[6]:
test_public=test[test['seq_length']==107]
test_private=test[test['seq_length']==130]
# In[7]:
test_public_x=test_public.loc[:,['id','sequence','structure','predicted_loop_type','bpp_max','bpp_mean']]
test_private_x=test_private.loc[:,['id','sequence','structure','predicted_loop_type','bpp_max','bpp_mean']]
#CUDAに乗らないので、privateデータのサイズを小さくする。
test_private_x1,test_private_x2=train_test_split(test_private_x,test_size=0.5)
# In[8]:
token2int = {x:i for i, x in enumerate('().<KEY>')}
def preprocess_inputs_public(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_public_x['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_public_x['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
def preprocess_inputs_private1(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_private_x1['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_private_x1['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
def preprocess_inputs_private2(df, cols=['sequence', 'structure', 'predicted_loop_type']):
base_fea= np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
bpps_max_fea = np.array(test_private_x2['bpp_max'].to_list())[:,:,np.newaxis]
bpps_mean_fea = np.array(test_private_x2['bpp_mean'].to_list())[:,:,np.newaxis]
return np.concatenate([base_fea,bpps_max_fea,bpps_mean_fea], 2)
# In[9]:
test_public_inputs = torch.from_numpy(preprocess_inputs_public(test_public_x)).to(device).float()
test_private_inputs1 = torch.from_numpy(preprocess_inputs_private1(test_private_x1)).to(device).float()
test_private_inputs2 = torch.from_numpy(preprocess_inputs_private2(test_private_x2)).to(device).float()
# In[10]:
#print('train_入力:{}\nvalue_入力:{}\ntrain_ラベル:{}\nvalue_ラベル:{}'.format(train_inputs.shape,val_inputs.shape,train_outputs.shape,val_outputs.shape))
# In[11]:
class LSTM_model(nn.Module):
def __init__(
self, seq_len=107, pred_len=68, dropout=0.5, embed_dim=100, hidden_dim=1024, hidden_layers=2
):
super(LSTM_model, self).__init__()
self.pred_len = pred_len
self.embeding = nn.Embedding(num_embeddings=len(token2int), embedding_dim=embed_dim)
self.lstm = nn.LSTM(
input_size=embed_dim * 3+2,
hidden_size=hidden_dim,
num_layers=hidden_layers,
dropout=dropout,
bidirectional=True,
batch_first=True,
)
self.linear = nn.Linear(hidden_dim * 2, 5)
def forward(self, seqs):
embed = self.embeding(seqs[:,:,0:3].long())
reshaped = torch.reshape(embed, (-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped= torch.cat((reshaped,seqs[:,:,3:5]),2)
output, hidden = self.lstm(reshaped)
truncated = output[:, : self.pred_len, :]
out = self.linear(truncated)
return out
# In[12]:
class GRU_model(nn.Module):
def __init__(
self, seq_len=107, pred_len=68, dropout=0.5, embed_dim=100, hidden_dim=1024, hidden_layers=2
):
super(GRU_model, self).__init__()
self.pred_len = pred_len
self.embeding = nn.Embedding(num_embeddings=len(token2int), embedding_dim=embed_dim)
self.gru = nn.GRU(
input_size=embed_dim * 3+2,
hidden_size=hidden_dim,
num_layers=hidden_layers,
dropout=dropout,
bidirectional=True,
batch_first=True,
)
self.linear = nn.Linear(hidden_dim * 2, 5)
def forward(self, seqs):
embed = self.embeding(seqs[:,:,0:3].long())
reshaped = torch.reshape(embed, (-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped= torch.cat((reshaped,seqs[:,:,3:5]),2)
output, hidden = self.gru(reshaped)
truncated = output[:, : self.pred_len, :]
out = self.linear(truncated)
return out
# In[13]:
LSTM_weights_path='../input/weight11/LSTM_ver20.pth'
def get_LSTM_model(seq_len=107, pred_len=68):
model = LSTM_model(seq_len=seq_len, pred_len=pred_len)
checkpoint = torch.load(LSTM_weights_path)
model.load_state_dict(checkpoint["model_state_dict"])
device = torch.device("cuda")
model.to(device)
model.eval()
return model
# In[14]:
GRU_weights_path='../input/weight11/GRU_ver8'
def get_GRU_model(seq_len=107, pred_len=68):
model = GRU_model(seq_len=seq_len, pred_len=pred_len)
checkpoint = torch.load(GRU_weights_path)
model.load_state_dict(checkpoint["model_state_dict"])
device = torch.device("cuda")
model.to(device)
model.eval()
return model
# In[15]:
with torch.no_grad():
model =get_LSTM_model()
prediction=model(test_public_inputs)
result_public_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_LSTM_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs1)
result_private1_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_LSTM_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs2)
result_private2_LSTM=prediction.to('cpu').detach().numpy().copy()
del prediction
# In[16]:
with torch.no_grad():
model =get_GRU_model()
prediction=model(test_public_inputs)
result_public_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_GRU_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs1)
result_private1_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
with torch.no_grad():
model =get_GRU_model(seq_len=130, pred_len=91)
prediction=model(test_private_inputs2)
result_private2_GRU=prediction.to('cpu').detach().numpy().copy()
del prediction
# In[17]:
df0 = pd.DataFrame(index=range(39), columns=['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',])
df0=df0.fillna(0)
# In[18]:
test_public_id=test_public['id']
idlist_public=test_public_id.values.tolist()
# In[19]:
test_private_id1=test_private_x1['id']
idlist_private1=test_private_id1.values.tolist()
idlist_private1[-5:]
# In[20]:
test_private_id2=test_private_x2['id']
idlist_private2=test_private_id2.values.tolist()
idlist_private2[:5]
# In[21]:
#無理やりソートすることに
testindex=samplesub.loc[:,['id_seqpos']]
testindex=testindex.reset_index()
# In[22]:
df1 = pd.DataFrame(result_public_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_public[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_public_LSTM)-1):
id = idlist_public[j+1]
df2 = pd.DataFrame(result_public_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
public_dataframe=df1
df1 = pd.DataFrame(result_private1_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private1[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private1_LSTM)-1):
id = idlist_private1[j+1]
df2 = pd.DataFrame(result_private1_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe1=df1
df1 = pd.DataFrame(result_private2_LSTM[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private2[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private2_LSTM)-1):
id = idlist_private2[j+1]
df2 = pd.DataFrame(result_private2_LSTM[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe2=df1
# In[23]:
merged_dataframe=pd.concat([public_dataframe,private_dataframe1,private_dataframe2])
pre_submission_LSTM=pd.merge(testindex,merged_dataframe)
# In[24]:
pre_submission_LSTM
# In[25]:
df1 = pd.DataFrame(result_public_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_public[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_public_GRU)-1):
id = idlist_public[j+1]
df2 = pd.DataFrame(result_public_GRU[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
public_dataframe=df1
df1 = pd.DataFrame(result_private1_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1=pd.concat([df1,df0])
id=idlist_private1[0]
for i in range(len(df1)):
df1.iloc[i,0]=id+'_{}'.format(i)
for j in range (len(result_private1_GRU)-1):
id = idlist_private1[j+1]
df2 = pd.DataFrame(result_private1_GRU[j+1])
df2.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df2.insert(0, 'id_seqpos', 0)
df2=pd.concat([df2,df0])
for i in range(len(df2)):
df2.iloc[i,0]=id+'_{}'.format(i)
df1=pd.concat([df1,df2])
private_dataframe1=df1
df1 = pd.DataFrame(result_private2_GRU[0])
df1.columns = ['reactivity', 'deg_Mg_pH10', 'deg_pH10','deg_Mg_50C','deg_50C',]
df1.insert(0, 'id_seqpos', 0)
df1= | pd.concat([df1,df0]) | pandas.concat |
"""
Contents:
_get_lithium_EW_df
get_Randich18_NGC2516
get_GalahDR3_lithium
get_GalahDR3_li_EWs
get_Bouvier18_pleiades_li_EWs
"""
import os
import numpy as np, pandas as pd
from numpy import array as nparr
from astropy.io import fits
from astropy.table import Table
from astropy import units as u, constants as const
from astropy.coordinates import SkyCoord
from earhart.paths import DATADIR, RESULTSDIR
def _get_lithium_EW_df(gaiaeso, galahdr3, EW_CUTOFF_mA=-99,
use_my_GaiaESO_EWs=1):
# gaiaeso and galahdr3: booleans for whether to retrieve
#
# Randich+18 measurements from the Gaia-ESO spectra
#
datapath = os.path.join(DATADIR, 'lithium',
'randich_fullfaintkinematic_xmatch_20210310.csv')
if not os.path.exists(datapath):
from earhart.lithium import _make_Randich18_xmatch
_make_Randich18_xmatch(datapath, vs_rotators=0)
gaiaeso_df = pd.read_csv(datapath)
gaiaeso_li_col = 'EWLi'
gaiaeso_li_errcol = 'e_EWLi'
# somewhat surprisingly, the kinematic X Randich+18 NGC2516 sample has no
# upper limits (and 477 reported detections).
assert len(np.unique(gaiaeso_df.f_EWLi)) == 1
from earhart.lithium import get_GalahDR3_li_EWs
galah_df = get_GalahDR3_li_EWs()
# the gaussian fit, numerically integrated
my_li_col = 'Fitted_Li_EW_mA'
s_gaiaeso_df = gaiaeso_df[[gaiaeso_li_col, gaiaeso_li_errcol, 'source_id']]
s_gaiaeso_df = s_gaiaeso_df.rename({gaiaeso_li_col: 'Li_EW_mA',
gaiaeso_li_errcol:'Li_EW_mA_perr'}, axis='columns')
s_gaiaeso_df['Li_EW_mA_merr'] = s_gaiaeso_df['Li_EW_mA_perr']
#s_gaiaeso_df['Li_provenance'] = 'Randich+18'
s_galah_df = galah_df[[my_li_col, my_li_col+"_perr",
my_li_col+"_merr", 'source_id']]
s_galah_df = s_galah_df.rename({my_li_col: 'Li_EW_mA',
my_li_col+"_perr": 'Li_EW_mA_perr',
my_li_col+"_merr": 'Li_EW_mA_merr'
}, axis='columns')
#s_galah_df['Li_provenance'] = 'GALAHDR3+ThisWork'
#
# My measuremnts from the Gaia-ESO spectra
#
mydatapath = os.path.join(DATADIR, 'lithium',
'gaiaesodr4_fullfaintkinematic_xmatch_20210421.csv')
my_gaiaeso_df = pd.read_csv(mydatapath)
my_gaiaeso_df = my_gaiaeso_df.rename({my_li_col: 'Li_EW_mA',
my_li_col+"_perr": 'Li_EW_mA_perr',
my_li_col+"_merr": 'Li_EW_mA_merr'
}, axis='columns')
smy_gaiaeso_df = my_gaiaeso_df[[
'Li_EW_mA', 'Li_EW_mA_perr', 'source_id', 'Li_EW_mA_merr'
]]
if use_my_GaiaESO_EWs:
s_gaiaeso_df = smy_gaiaeso_df
if gaiaeso and galahdr3:
df = pd.concat((s_gaiaeso_df, s_galah_df))
df = df[df.Li_EW_mA > EW_CUTOFF_mA]
print(f'Gaia-ESO + GALAH: got {len(df[~pd.isnull(df.Li_EW_mA)])} finite Li EWs > {EW_CUTOFF_mA} mA')
if gaiaeso and not galahdr3:
df = s_gaiaeso_df
df = df[df.Li_EW_mA > EW_CUTOFF_mA]
print(f'Gaia-ESO: got {len(df[~pd.isnull(df.Li_EW_mA)])} finite Li EWs > {EW_CUTOFF_mA} mA')
if not gaiaeso and galahdr3:
df = s_galah_df
df = df[df.Li_EW_mA > EW_CUTOFF_mA]
print(f'GALAH: got {len(df[~pd.isnull(df.Li_EW_mA)])} finite Li EWs > {EW_CUTOFF_mA} mA')
return df[df.Li_EW_mA > EW_CUTOFF_mA]
def get_GalahDR3_li_EWs(verbose=1):
"""
Made by drivers.measure_galah_dr3_Li_EWs.py
Target spectra are from the crossmatch of "fullfaint" (DR2) with the GALAH
target list.
"""
if verbose:
print("WRN! These GALAH DR3 EWs are janky for <50mA b/c of S/N.")
galah_li_path = os.path.join(
DATADIR, 'lithium', 'galahdr3_fullfaintkinematic_xmatch_20210310.csv'
)
li_df = pd.read_csv(galah_li_path)
return li_df
def get_GalahDR3_lithium(verbose=1, defaultflags=0):
"""
Get astropy table of stellar parameter flag == 0 stars from GALAH DR3, with
lithium detections or upper limits.
"""
# downloaded via wget, per
# https://github.com/svenbuder/GALAH_DR3/blob/master/tutorials/tutorial1_dr3_main_catalog_overview.ipynb
dr3path = os.path.join(DATADIR, 'lithium', 'GALAH_DR3_main_allstar_v1.fits')
dr3_tab = Table.read(dr3path)
if defaultflags:
# stellar parameter flag
qual = np.array(dr3_tab['flag_sp'])
binary_repr_vec = np.vectorize(np.binary_repr)
qual_binary = binary_repr_vec(qual, width=11)
# 11 total flag bits:
# 2^0 = 1: Gaia DR2 RUWE > 1.4
# 2^1 = 2: unreliable broadening
# 2^2 = 4: low S/N
# 2^3 = 8: reduction issues (wvlen soln, t-SNE reduction issues,
# weird fluxes, spikes, etc.
# 4 = 16: t-SNE projected emission features
# 5 = 32: t-SNE projected binaries
# 6 = 64: on binary sequence / PMS sequence
# 7 = 128: S/N dependent high SME chi2 (bad fit)
# 8 = 256: problems with Fe: line flux not between 0.03 and 1.00,
# [Fe/H] unreliable, or blending suspects and SME didnt finish
# 9 = 512: SME did not finish. Either a) No convergence -> nonfinite stellar
# parameters. Or b) Gaussian RV fit failed.
# 2^10 = 1024: MARCS grid limit reached or outside reasonable
# parameter range.
# Need reliable broadening, S/N, etc.
# 2^1 = 2: unreliable broadening
# 2^2 = 4: low S/N
# 2^3 = 8: reduction issues (wvlen soln, t-SNE reduction issues,
# weird fluxes, spikes, etc.
# 8 = 256: problems with Fe: line flux not between 0.03 and 1.00,
# [Fe/H] unreliable, or blending suspects and SME didnt finish
# 9 = 512: SME did not finish. Either a) No convergence -> nonfinite stellar
# parameters. Or b) Gaussian RV fit failed.
# 2^10 = 1024: MARCS grid limit reached or outside reasonable
# parameter range.
badbits = [1,2,3]
sel = np.isfinite(dr3_tab['source_id'])
for bb in badbits:
# zero -> one-based count here to convert bitwise flags to
# python flags
sel &= ~(np.array([q[bb-1] for q in qual_binary]).astype(bool))
else:
sel = np.isfinite(dr3_tab['source_id'])
if verbose:
print(f'All stars in GALAH DR3: {len(dr3_tab)}')
if defaultflags:
print(f'All stars in GALAH DR3 with bitflags {repr(badbits)} not set: {len(dr3_tab[sel])}')
return dr3_tab[sel]
def get_Randich18_NGC2516():
hl = fits.open(
os.path.join(DATADIR, 'lithium',
'Randich_2018_NGC2516_all796_entries_vizier.fits')
)
t_df = Table(hl[1].data).to_pandas()
return t_df
def _make_Randich18_xmatch(datapath, vs_rotators=1, RADIUS=0.5):
"""
For every Randich+18 Gaia-ESO star with a spectrum, look for a rotator
match (either the "gold" or "autorot" samples) within RADIUS arcseconds.
If you find it, pull its data. If there are multiple, take the closest.
"""
rdf = get_Randich18_NGC2516()
if vs_rotators:
raise DeprecationWarning
rotdir = os.path.join(DATADIR, 'rotation')
rot_df = pd.read_csv(
os.path.join(rotdir, 'ngc2516_rotation_periods.csv')
)
comp_df = rot_df[rot_df.Tags == 'gold']
print('Comparing vs the "gold" NGC2516 rotators sample (core + halo)...')
else:
from earhart.helpers import _get_fullfaint_dataframes
nbhd_df, core_df, halo_df, full_df, target_df = _get_fullfaint_dataframes()
comp_df = full_df
print(f'Comparing vs the {len(comp_df)} "fullfaint" kinematic NGC2516 rotators sample (core + halo)...')
c_comp = SkyCoord(ra=nparr(comp_df.ra)*u.deg, dec=nparr(comp_df.dec)*u.deg)
c_r18 = SkyCoord(ra=nparr(rdf._RA)*u.deg, dec=nparr(rdf._DE)*u.deg)
cutoff_radius = RADIUS*u.arcsec
has_matchs, match_idxs, match_rows = [], [], []
for ix, _c in enumerate(c_r18):
if ix % 100 == 0:
print(f'{ix}/{len(c_r18)}')
seps = _c.separation(c_comp)
if min(seps.to(u.arcsec)) < cutoff_radius:
has_matchs.append(True)
match_idx = np.argmin(seps)
match_idxs.append(match_idx)
match_rows.append(comp_df.iloc[match_idx])
else:
has_matchs.append(False)
has_matchs = nparr(has_matchs)
left_df = rdf[has_matchs]
right_df = | pd.DataFrame(match_rows) | pandas.DataFrame |
"""
Copyright (C) 2013-2019 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
time.py
~~~~~~~
Functionality to add and process time varying parameters
"""
import xarray as xr
import numpy as np
import pandas as pd
from calliope import exceptions
from calliope.core.attrdict import AttrDict
from calliope.core.util.tools import plugin_load
from calliope.preprocess import checks
from calliope.core.util.dataset import reorganise_xarray_dimensions
def apply_time_clustering(model_data, model_run):
"""
Take a Calliope model_data post time dimension addition, prior to any time
clustering, and apply relevant time clustering/masking techniques.
See doi: 10.1016/j.apenergy.2017.03.051 for applications.
Techniques include:
- Clustering timeseries into a selected number of 'representative' days.
Days with similar profiles and daily magnitude are grouped together and
represented by one 'representative' day with a greater weight per time
step.
- Masking timeseries, leading to variable timestep length
Only certain parts of the input are shown at full resolution, with other
periods being clustered together into a single timestep.
E.g. Keep high resolution in the week with greatest wind power variability,
smooth all other timesteps to 12H
- Timestep resampling
Used to reduce problem size by reducing resolution of all timeseries data.
E.g. resample from 1H to 6H timesteps
Parameters
----------
model_data : xarray Dataset
Preprocessed Calliope model_data, as produced using
`calliope.preprocess.build_model_data`
and found in model._model_data_original
model_run : bool
preprocessed model_run dictionary, as produced by
Calliope.preprocess_model
Returns
-------
data : xarray Dataset
Dataset with optimisation parameters as variables, optimisation sets as
coordinates, and other information in attributes. Time dimension has
been updated as per user-defined clustering techniques (from model_run)
"""
time_config = model_run.model["time"]
data = model_data.copy(deep=True)
##
# Process masking and get list of timesteps to keep at high res
##
if "masks" in time_config:
masks = {}
# time.masks is a list of {'function': .., 'options': ..} dicts
for entry in time_config.masks:
entry = AttrDict(entry)
mask_func = plugin_load(
entry.function, builtin_module="calliope.time.masks"
)
mask_kwargs = entry.get_key("options", default=AttrDict()).as_dict()
masks[entry.to_yaml()] = mask_func(data, **mask_kwargs)
data.attrs["masks"] = masks
# Concatenate the DatetimeIndexes by using dummy Series
chosen_timesteps = pd.concat(
[pd.Series(0, index=m) for m in masks.values()]
).index
# timesteps: a list of timesteps NOT picked by masks
timesteps = | pd.Index(data.timesteps.values) | pandas.Index |
"""
@author: <NAME>
@author: <NAME>
"""
import twint
import pandas as pd
import os
import configobj
config = configobj.ConfigObj('env.b')
path = config['TWEETS_PATH']
ita_path = config['ITA_PATH']
eng_path = config['ENG_PATH']
fra_path = config['FRA_PATH']
bra_path = config['BRA_PATH']
ind_path = config['IND_PATH']
"""
Scraping dei tweet di Narendra Modi (India)
"""
def create_india_csv():
c = twint.Config()
username = "narendramodi"
c.Username = username
c.Since = '2020-01-01'
c.Until = '2020-06-29'
c.Limit = 3000
c.Store_csv = True
c.Output = path + "tweets_india_1.csv"
twint.run.Search(c)
c = twint.Config()
username = "narendramodi"
c.Username = username
c.Since = '2020-06-30'
c.Until = '2021-06-09'
c.Limit = 3000
c.Store_csv = True
c.Output = path+"tweets_india_2.csv"
twint.run.Search(c)
data1 = | pd.read_csv(path + "tweets_india_1.csv") | pandas.read_csv |
from numpy.ma import add
import pandas as pd
import numpy as np
np.seterr(divide='ignore')
import scipy.signal as signal
import scipy.stats as stats
import matplotlib.pyplot as plt
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels.stats.multitest as multi
from scipy.optimize import curve_fit
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from scipy.stats import percentileofscore
from scipy.stats import circstd, circmean
import copy
import itertools
from matplotlib.lines import Line2D
from random import sample
import os
from skopt.space import Space
from skopt.sampler import Lhs
def periodogram_df(df, folder = '', **kwargs):
names = list(df.test.unique())
names.sort()
for name in names:
x, y = np.array(df[df.test == name].x), np.array(df[df.test == name].y)
if folder:
save_to = os.path.join(folder, "per_" + name)
else:
save_to = ""
periodogram(x,y, save_to = save_to, name=name, **kwargs)
def periodogram(X, Y, per_type='per', sampling_f = '', logscale = False, name = '', save_to = '', prominent = False, max_per = 240):
if per_type == 'per' or per_type == 'welch':
X_u = np.unique(X)
Y_u = []
for x_u in X_u:
#y_u.append(np.mean(y[t == x]))
Y_u.append(np.median(Y[x_u == X]))
if not sampling_f:
sampling_f = 1/(X[1]-X[0])
Y = Y_u
if per_type == 'per':
# Fourier
f, Pxx_den = signal.periodogram(Y,sampling_f)
elif per_type =='welch':
# Welch
f, Pxx_den = signal.welch(Y,sampling_f)
elif per_type == 'lombscargle':
# Lomb-Scargle
min_per = 2
#max_per = 50
f = np.linspace(1/max_per, 1/min_per, 10)
Pxx_den = signal.lombscargle(X, Y, f)
else:
print("Invalid option")
return
# significance
# Refinetti et al. 2007
p_t = 0.05
N = len(Y)
T = (1 - (p_t/N)**(1/(N-1))) * sum(Pxx_den) # threshold for significance
if f[0] == 0:
per = 1/f[1:]
Pxx = Pxx_den[1:]
else:
per = 1/f
Pxx = Pxx_den
Pxx = Pxx[per <= max_per]
per = per[per <= max_per]
try:
if logscale:
plt.semilogx(per, Pxx, 'ko')
plt.semilogx(per, Pxx, 'k--', linewidth=0.5)
plt.semilogx([min(per), max(per)], [T, T], 'k--', linewidth=1)
else:
plt.plot(per, Pxx, 'ko')
plt.plot(per, Pxx, 'k--', linewidth=0.5)
plt.plot([min(per), max(per)], [T, T], 'k--', linewidth=1)
except:
print("Could not plot!")
return
peak_label = ''
if prominent:
locs, heights = signal.find_peaks(Pxx, height = T)
if any(locs):
heights = heights['peak_heights']
s = list(zip(heights, locs))
s.sort(reverse=True)
heights, locs = zip(*s)
heights = np.array(heights)
locs = np.array(locs)
peak_label = ', max peak=' + str(per[locs[0]])
else:
locs = Pxx >= T
if any(locs):
heights, locs = Pxx[locs], per[locs]
HL = list(zip(heights, locs))
HL.sort(reverse = True)
heights, locs = zip(*HL)
peak_label = ', peaks=\n'
locs = locs[:11]
for loc in locs[:-1]:
peak_label += "{:.2f}".format(loc) + ','
peak_label += "{:.2f}".format(locs[-1])
plt.xlabel('period [hours]')
plt.ylabel('PSD')
plt.title(name + peak_label)
if save_to:
plt.savefig(save_to+'.pdf')
plt.savefig(save_to+'.png')
plt.close()
else:
plt.show()
def remove_lin_comp_df(df, n_components = 0, period = 24, summary_file=""):
df2 = pd.DataFrame(columns=df.columns)
if summary_file:
df_fit = pd.DataFrame(columns=['test', 'k', 'CI', 'p', 'q'])
for test in df.test.unique():
x,y = df[df['test']==test].x,df[df['test']==test].y
x,y,fit = remove_lin_comp(x,y,n_components=n_components, period=period, return_fit=True)
df_tmp = pd.DataFrame(columns=df.columns)
df_tmp['x'] = x
df_tmp['y'] = y
df_tmp['test'] = test
df2 = df2.append(df_tmp, ignore_index=True)
if summary_file:
fit['test'] = test
df_fit=df_fit.append(fit, ignore_index=True)
if summary_file:
df_fit.q = multi.multipletests(df_fit.p, method = 'fdr_bh')[1]
if summary_file.endswith("csv"):
df_fit.to_csv(summary_file, index=False)
elif summary_file.endswith("xlsx"):
df_fit.to_excel(summary_file, index=False)
return df2
def remove_lin_comp(X, Y, n_components = 0, period = 24, return_fit=False):
X = np.array(X)
Y = np.array(Y)
X_fit = generate_independents(X, n_components = n_components, period = period, lin_comp = True)
model = sm.OLS(Y, X_fit)
results = model.fit()
CIs = results.conf_int()
if type(CIs) != np.ndarray:
CIs = CIs.values
CI = CIs[1]
#A = results.params[0]
k = results.params[1]
"""
X_lin = np.zeros(X_fit.shape)
X_lin[:,1] = X_fit[:,1]
Y_lin = results.predict(X_lin)
Y = Y-Y_lin
"""
#Y_fit = results.predict(X_fir)
#Y = Y - Y_fit
#Y = Y - A - k*X
if CI[0] * CI[1] > 0: # if both CIs hve the same sign
Y = Y - k*X
if return_fit:
fit = {}
fit['k'] = results.params[1]
fit['CI'] = CI
fit['p'] = results.pvalues[1]
return X,Y,fit
"""
X_fit = generate_independents(X, n_components = n_components, period = period, lin_comp = False)
model = sm.OLS(Y, X_fit)
results = model.fit()
plt.plot(X, results.fittedvalues, color="black")
"""
return X, Y
# prepare the independent variables
def generate_independents(X, n_components = 3, period = 24, lin_comp = False, remove_lin_comp = False):
if n_components == 0:
X_fit = X
lin_comp = True
else:
for i in np.arange(n_components):
n = i+1
A = np.sin((X/(period/n))*np.pi*2)
B = np.cos((X/(period/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
if lin_comp and n_components:
X_fit = np.column_stack((X, X_fit))
if remove_lin_comp:
X_fit[:,0] = 0
X_fit = sm.add_constant(X_fit, has_constant='add')
return X_fit
# prepare the independent variables for limorhyde
def generate_independents_compare(X1, X2, n_components1 = 3, period1 = 24, n_components2 = 3, period2 = 24, lin_comp = False, non_rhythmic=False, remove_lin_comp=False):
H1 = np.zeros(X1.size)
H2 = np.ones(X2.size)
X = np.concatenate((X1, X2))
H_i = np.concatenate((H1, H2))
X_i = H_i * X
for i in np.arange(n_components1):
n = i+1
A = np.sin((X/(period1/n))*np.pi*2)
B = np.cos((X/(period1/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
if non_rhythmic:
X_fit = np.column_stack((X_fit, H_i))
else:
for i in np.arange(n_components2):
n = i+1
A_i = H_i * np.sin((X/(period2/n))*np.pi*2)
B_i = H_i * np.cos((X/(period2/n))*np.pi*2)
X_fit = np.column_stack((X_fit, np.column_stack((A_i, B_i))))
X_fit = np.column_stack((X_fit, H_i))
if lin_comp:
X_fit = np.column_stack((X_i, X_fit))
X_fit = np.column_stack((X, X_fit))
if remove_lin_comp:
X_fit[:,0] = 0
X_fit[:,1] = 0
X_fit = sm.add_constant(X_fit, has_constant='add')
return X_fit
"""
*****************************
* start of finding the best *
*****************************
"""
def get_best_fits(df_results, criterium = 'R2_adj', reverse = False, n_components = []):
df_best = pd.DataFrame(columns = df_results.columns, dtype=float)
names = np.unique(df_results.test)
for name in names:
if n_components:
for n_comp in n_components:
if reverse:
M = df_results[(df_results.test == name) & (df_results.n_components == n_comp)][criterium].min()
else:
M = df_results[(df_results.test == name) & (df_results.n_components == n_comp)][criterium].max()
df_best = df_best.append(df_results[(df_results.test == name) & (df_results.n_components == n_comp) & (df_results[criterium] == M)], ignore_index = True)
else:
M = df_results[df_results.test == name][criterium].max()
df_best = df_best.append(df_results[(df_results.test == name) & (df_results[criterium] == M)], ignore_index = True)
return df_best
def get_best_models_population(df, df_models, n_components = [1,2,3], lin_comp = False, criterium = 'RSS', reverse = True):
names = np.unique(df_models.test)
df_best = pd.DataFrame(columns = df_models.columns, dtype=float)
df_models = get_best_fits(df_models, criterium = criterium, reverse = reverse, n_components=n_components)
for test in names:
n_points = df[df.test.str.startswith(test)].x.shape[0] # razlika med get_best_models in get_best_models_population
df_test_models = df_models[df_models.test == test]
df_test_models = df_test_models.sort_values(by=['n_components'])
i = 0
for new_row in df_test_models.iterrows():
if i == 0:
best_row = new_row
i = 1
else:
RSS_reduced = best_row[1].RSS
RSS_full = new_row[1].RSS
DF_reduced = n_points - (best_row[1].n_components * 2 + 1)
DF_full = n_points - (new_row[1].n_components * 2 + 1)
if lin_comp:
DF_reduced -= 1
DF_full -= 1
#print (test, old_row[1].n_components, new_row[1].n_components)
if compare_models(RSS_reduced, RSS_full, DF_reduced, DF_full) < 0.05:
best_row = new_row
df_best = df_best.append(best_row[1], ignore_index=True)
return df_best
# compare two models according to the F-test
# http://people.reed.edu/~jones/Courses/P24.pdf
# https://www.graphpad.com/guides/prism/7/curve-fitting/index.htm?reg_howtheftestworks.htm
def get_best_models(df, df_models, n_components = [1,2,3], lin_comp = False, criterium='p', reverse = True):
names = np.unique(df_models.test)
df_best = pd.DataFrame(columns = df_models.columns, dtype=float)
df_models = get_best_fits(df_models, n_components = n_components, criterium=criterium, reverse = reverse)
for test in names:
n_points = df[df.test == test].x.shape[0]
df_test_models = df_models[df_models.test == test]
df_test_models = df_test_models.sort_values(by=['n_components'])
i = 0
for new_row in df_test_models.iterrows():
if i == 0:
best_row = new_row
i = 1
else:
RSS_reduced = best_row[1].RSS
RSS_full = new_row[1].RSS
DF_reduced = n_points - (best_row[1].n_components * 2 + 1)
DF_full = n_points - (new_row[1].n_components * 2 + 1)
if lin_comp:
DF_reduced -= 1
DF_full -= 1
#print (test, old_row[1].n_components, new_row[1].n_components)
if compare_models(RSS_reduced, RSS_full, DF_reduced, DF_full) < 0.05:
best_row = new_row
df_best = df_best.append(best_row[1], ignore_index=True)
return df_best
"""
***************************
* end of finding the best *
***************************
"""
"""
************
* plotting *
************
"""
def plot_data(df, names = [], folder = '', prefix = '', color='black'):
if not names:
names = np.unique(df.test)
for test in names:
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
plt.plot(X,Y,'o', markersize=1, color=color)
plt.title(test)
#test = test.replace("$","")
#fig = plt.gcf()
#fig.set_size_inches(11,8)
if folder:
plt.savefig(os.path.join(folder, prefix+test+'.png'))
plt.savefig(os.path.join(folder, prefix+test+'.pdf'))
plt.close()
else:
plt.show()
def plot_data_pairs(df, names, folder = '', prefix ='', color1='black', color2='red'):
for test1, test2 in names:
X1, Y1 = np.array(df[df.test == test1].x), np.array(df[df.test == test1].y)
X2, Y2 = np.array(df[df.test == test2].x), np.array(df[df.test == test2].y)
plt.plot(X1,Y1,'o', color=color1, markersize=1, label=test1)
plt.plot(X2,Y2,'o', color=color2, markersize=1, label=test2)
plt.legend()
plt.title(test1 + ' vs. ' + test2)
if folder:
plt.savefig(os.path.join(folder,prefix+test1+'_'+test2+'.png'))
plt.savefig(os.path.join(folder,prefix+test1+'_'+test2+'.pdf'))
plt.close()
else:
plt.show()
def plot_components(X, Y, n_components = 3, period = 24, name = '', save_to = ''):
A = np.sin((X/period)*np.pi*2)
B = np.cos((X/period)*np.pi*2)
C = np.sin((X/(period/2))*np.pi*2)
D = np.cos((X/(period/2))*np.pi*2)
E = np.sin((X/(period/3))*np.pi*2)
F = np.cos((X/(period/3))*np.pi*2)
#G = np.sin((X/(period/4))*np.pi*2)
#H = np.cos((X/(period/4))*np.pi*2)
fig, axs = plt.subplots(n_components, 2, constrained_layout=True)
fig.suptitle(name, fontsize=16)
axs[0,0].plot(A, Y,'.')
axs[0,0].set(xlabel = 'sin((x/'+str(period)+') * 2$\pi$)')
axs[0,1].plot(B, Y,'.')
axs[0,1].set(xlabel = 'cos((x/'+str(period)+') * 2$\pi$)')
if n_components >= 2:
axs[1,0].plot(C, Y,'.')
axs[1,0].set(xlabel = 'sin((x/'+str(period/2)+') * 2$\pi$)')
axs[1,1].plot(D, Y,'.')
axs[1,1].set(xlabel = 'cos((x/'+str(period/2)+') * 2$\pi$)')
if n_components == 3:
axs[2,0].plot(E, Y,'.')
axs[2,0].set(xlabel = 'sin((x/'+str(period/3)+') * 2$\pi$)')
axs[2,1].plot(F, Y,'.')
axs[2,1].set(xlabel = 'cos((x/'+str(period/3)+') * 2$\pi$)')
if n_components == 4:
axs[3,0].plot(E, Y,'.')
axs[3,0].set(xlabel = 'sin((x/'+str(period/4)+') * 2$\pi$)')
axs[3,1].plot(F, Y,'.')
axs[3,1].set(xlabel = 'cos((x/'+str(period/4)+') * 2$\pi$)')
for ax in axs.flat:
ax.set(ylabel = 'y')
if save_to:
plt.savefig(save_to+'.pdf')
plt.savefig(save_to+'.png')
plt.close()
else:
plt.show()
def plot_phases(acrs, amps, tests, period=24, colors = ("black", "red", "green", "blue"), folder = "", prefix="", legend=True, CI_acrs = [], CI_amps = [], linestyles = [], title = "", labels = []):#, plot_measurements = False, measurements=None):
acrs = np.array(acrs, dtype = float)
amps = np.array(amps, dtype = float)
if colors and len(colors) < len(tests):
colors += ("black",) * (len(tests)-len(colors))
x = np.arange(0, 2*np.pi, np.pi/4)
x_labels = list(map(lambda i: 'CT ' + str(i) + " ", list((x/(2*np.pi) * period).astype(int))))
x_labels[1::2] = [""]*len(x_labels[1::2])
ampM = np.max(amps)
amps /= ampM
acrs = -acrs
fig = plt.figure()
ax = fig.add_subplot(projection='polar')
ax.set_theta_offset(0.5*np.pi)
ax.set_theta_direction(-1)
lines = []
for i, (acr, amp, test, color) in enumerate(zip(acrs, amps, tests, colors)):
"""
if "LDL" in test:
color = "#FF0000"
elif "HDL" in test:
color = "#0000FF"
elif "CHOL" in test:
color = "#00FF00"
elif "control" in test.lower():
color = "#000000"
else:
color = "#0000FF"
"""
if linestyles:
#ax.plot([acr, acr], [0, amp], label=test, color=color, linestyle = linestyles[i])
ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, alpha = 0.75, linewidth=2, linestyle = linestyles[i]) )
lines.append(Line2D([0], [0], color=color, linewidth=2, linestyle=linestyles[i]))
else:
#ax.plot([acr, acr], [0, amp], label=test, color=color)
ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, alpha = 0.75, linewidth=2) )
lines.append(Line2D([0], [0], color=color, linewidth=2))
#ax.plot([acr, acr], [0, amp], label=test, color=color)
#ax.annotate("", xy=(acr, amp), xytext=(0, 0), arrowprops=dict(arrowstyle="->", color=color, linewidth=2) )
if CI_acrs and CI_amps:
amp_l, amp_u = np.array(CI_amps[i])/ampM
amp_l = max(0, amp_l)
amp_u = min(1, amp_u)
acr_l, acr_u = -np.array(CI_acrs[i])
if acr_l - acr_u > 2*np.pi:
plt.fill_between(np.linspace(0, np.pi*2, 1000), amp_l, amp_u, color=color, alpha=0.1)
elif acr_u < acr_l:
acr_l, acr_u = acr_u, acr_l
plt.fill_between(np.linspace(acr_l, acr_u, 1000), amp_l, amp_u, color=color, alpha=0.1)
ax.set_rmax(1)
ax.set_rticks([0.5]) # Less radial ticks
ax.set_yticklabels([""])
ax.set_xticks(x)
ax.set_xticklabels(x_labels)
ax.grid(True)
ax.set_facecolor('#f0f0f0')
"""
for i, (acr, amp, test, color) in enumerate(zip(acrs, amps, tests, colors)):
if plot_measurements:
try:
x,y = measurements
except:
df = measurements
x,y=df[df.test == test].x, df[df.test == test].y
plt.plot(x,y,'o',markersize=1, alpha = 0.75, color=color)
"""
name = "_".join(tests)
#ax.set_title(name, va='bottom')
if title:
ax.set_title(title, va='bottom')
else:
ax.set_title(name, va='bottom')
if legend:
if labels:
plt.legend(lines, labels, bbox_to_anchor=(1.0, 1), loc='upper left', borderaxespad=0., frameon=False)
else:
plt.legend(lines, tests, bbox_to_anchor=(1.0, 1), loc='upper left', borderaxespad=0., frameon=False)
#ax.legend()
if folder:
plt.savefig(os.path.join(folder,prefix+name+"_phase.pdf"))
plt.savefig(os.path.join(folder,prefix+name+"_phase.png"))
plt.close()
else:
plt.show()
"""
*******************
* end of plotting *
*******************
"""
"""
*****************************
* start of fitting wrappers *
*****************************
"""
def fit_group(df, n_components = 2, period = 24, names = "", folder = '', prefix='', **kwargs):
df_results = pd.DataFrame(columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject', 'RSS', 'R2', 'R2_adj', 'log-likelihood', 'amplitude', 'acrophase', 'mesor', 'peaks', 'heights', 'troughs', 'heights2'], dtype=float)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
if not any(names):
names = np.unique(df.test)
for test in names:
for n_comps in n_components:
for per in period:
if n_comps == 0:
per = 100000
X, Y = np.array(df[df.test == test].x), np.array(df[df.test == test].y)
if folder:
save_to = os.path.join(folder,prefix+test+'_compnts='+str(n_comps) +'_per=' + str(per))
else:
save_to = ''
results, statistics, rhythm_param, _, _ = fit_me(X, Y, n_components = n_comps, period = per, name = test, save_to = save_to, **kwargs)
try:
R2, R2_adj = results.rsquared,results.rsquared_adj
except:
R2, R2_adj = np.nan, np.nan
df_results = df_results.append({'test': test,
'period': per,
'n_components': n_comps,
'p': statistics['p'],
'p_reject': statistics['p_reject'],
'RSS': statistics['RSS'],
'R2': R2,
'R2_adj': R2_adj,
'ME': statistics['ME'],
'resid_SE': statistics['resid_SE'],
'log-likelihood': results.llf,
'amplitude': rhythm_param['amplitude'],
'acrophase': rhythm_param['acrophase'],
'mesor': rhythm_param['mesor'],
'peaks': rhythm_param['peaks'],
'heights': rhythm_param['heights'],
'troughs': rhythm_param['troughs'],
'heights2': rhythm_param['heights2']
}, ignore_index=True)
if n_comps == 0:
break
df_results.q = multi.multipletests(df_results.p, method = 'fdr_bh')[1]
df_results.q_reject = multi.multipletests(df_results.p_reject, method = 'fdr_bh')[1]
return df_results
def population_fit_group(df, n_components = 2, period = 24, folder = '', prefix='', names = [], **kwargs):
df_results = pd.DataFrame(columns = ['test', 'period', 'n_components', 'p', 'q', 'p_reject', 'q_reject', 'RSS', 'amplitude', 'acrophase', 'mesor'], dtype=float)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
if not any(names):
names = np.unique(df.test)
names = list(set(list(map(lambda x:x.split('_rep')[0], names))))
names.sort()
for name in set(names):
for n_comps in n_components:
for per in period:
if n_comps == 0:
per = 100000
df_pop = df[df.test.str.startswith(name)]
if folder:
save_to=os.path.join(folder,prefix+name+'_compnts='+str(n_comps) +'_per=' + str(per))
_, statistics, _, rhythm_params, _ = population_fit(df_pop, n_components = n_comps, period = per, save_to = save_to, **kwargs)
else:
_, statistics, _, rhythm_params, _ = population_fit(df_pop, n_components = n_comps, period = per, **kwargs)
df_results = df_results.append({'test': name,
'period': per,
'n_components': n_comps,
'p': statistics['p'],
'p_reject': statistics['p_reject'],
'RSS': statistics['RSS'],
'ME': statistics['ME'],
'resid_SE': statistics['resid_SE'],
'amplitude': rhythm_params['amplitude'],
'acrophase': rhythm_params['acrophase'],
'mesor': rhythm_params['mesor']}, ignore_index=True)
if n_comps == 0:
break
df_results.q = multi.multipletests(df_results.p, method = 'fdr_bh')[1]
df_results.q_reject = multi.multipletests(df_results.p_reject, method = 'fdr_bh')[1]
return df_results
"""
***************************
* end of fitting wrappers *
***************************
"""
"""
******************************
* start of fitting functions *
******************************
"""
def population_fit(df_pop, n_components = 2, period = 24, lin_comp= False, model_type = 'lin', plot = True, plot_measurements=True, plot_individuals=True, plot_margins=True, hold = False, save_to = '', x_label='', y_label='', return_individual_params = False, params_CI = False, samples_per_param_CI=5, max_samples_CI = 1000, sampling_type = "LHS", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], color="black", **kwargs):
if return_individual_params:
ind_params = {}
for param in parameters_to_analyse:
ind_params[param] = []
params = -1
tests = df_pop.test.unique()
k = len(tests)
#X_test = np.linspace(0, 2*period, 1000)
#X_fit_eval_params = generate_independents(X_test, n_components = n_components, period = period, lin_comp = lin_comp)
#if lin_comp:
# X_fit_eval_params[:,1] = 0
min_X = np.min(df_pop.x.values)
max_X = np.max(df_pop.x.values)
min_Y = np.min(df_pop.y.values)
max_Y = np.max(df_pop.y.values)
if plot:
if plot_measurements:
X_plot = np.linspace(min(min_X,0), 1.1*max(max_X,period), 1000)
else:
X_plot = np.linspace(0, 1.1*period, 1000)
X_plot_fits = generate_independents(X_plot, n_components = n_components, period = period, lin_comp = lin_comp)
#if lin_comp:
# X_plot_fits[:,1] = 0
"""
min_X = 1000
max_X = 0
min_Y = 1000
max_Y = 0
min_X_test = np.min(X_test)
"""
min_Y_test = 1000
max_Y_test = 0
for test in tests:
x,y = np.array(df_pop[df_pop.test == test].x), np.array(df_pop[df_pop.test == test].y)
"""
min_X = min(min_X, np.min(x))
max_X = max(max_X, np.max(x))
min_Y = min(min_Y, np.min(y))
max_Y = max(max_Y, np.max(y))
"""
results, statistics, rhythm_params, X_test, Y_test, model = fit_me(x, y, n_components = n_components, period = period, plot = False, return_model = True, lin_comp=lin_comp, **kwargs)
X_fit_eval_params = generate_independents(X_test, n_components = n_components, period = period, lin_comp = lin_comp, remove_lin_comp=True)
if lin_comp:
X_fit_eval_params[:,1] = 0
if return_individual_params:
Y_eval_params = results.predict(X_fit_eval_params)
rhythm_ind_params = evaluate_rhythm_params(X_test, Y_eval_params, period=period)
for param in parameters_to_analyse:
ind_params[param].append(rhythm_ind_params[param])
if (plot and plot_individuals):
#Y_eval_params = results.predict(X_fit_eval_params)
Y_plot_fits = results.predict(X_plot_fits)
if (plot and plot_individuals):
if not hold:
plt.plot(X_plot,Y_plot_fits,color=color, alpha=0.25, label=test)
else:
plt.plot(X_plot,Y_plot_fits,color=color, alpha=0.25)
min_Y_test = min(min_Y_test, np.min(Y_plot_fits))
max_Y_test = max(max_Y_test, np.max(Y_plot_fits))
if plot and plot_measurements:
plt.plot(x,y,'o', color=color, markersize=1)
if type(params) == int:
params = results.params
if plot and plot_margins:
#_, lowers, uppers = wls_prediction_std(results, exog=X_fit_eval_params, alpha=0.05)
Y_plot_fits_all = Y_plot_fits
else:
params = np.vstack([params, results.params])
if plot and plot_margins:
#_, l, u = wls_prediction_std(results, exog=X_fit_eval_params, alpha=0.05)
#lowers = np.vstack([lowers, l])
#uppers = np.vstack([uppers, u])
Y_plot_fits_all = np.vstack([Y_plot_fits_all, Y_plot_fits])
# parameter statistics: means, variances, stadndard deviations, confidence intervals, p-values
#http://reliawiki.com/index.php/Multiple_Linear_Regression_Analysis
if k > 1:
means = np.mean(params, axis=0)
variances = np.sum((params-np.mean(params, axis=0))**2, axis = 0)/(k-1) # np.var(params, axis=0) # isto kot var z ddof=k-1
sd = variances**0.5
se = sd/((k-1)**0.5)
T0 = means/se
p_values = 2 * (1 - stats.t.cdf(abs(T0), k-1))
t = abs(stats.t.ppf(0.05/2,df=k-1))
lower_CI = means - ((t*sd)/((k-1)**0.5))
upper_CI = means + ((t*sd)/((k-1)**0.5))
results.initialize(model, means)
else:
means = params
sd = np.zeros(len(params))
sd[:] = np.nan
se = np.zeros(len(params))
se[:] = np.nan
lower_CI = means
upper_CI = means
p_values = np.zeros(len(params))
p_values[:] = np.nan
x,y = df_pop.x, df_pop.y
xy = list(zip(x,y))
xy.sort()
x,y = zip(*xy)
x,y = np.array(x), np.array(y)
X_fit = generate_independents(x, n_components = n_components, period = period, lin_comp = lin_comp)
Y_fit = results.predict(X_fit)
Y_eval_params = results.predict(X_fit_eval_params)
rhythm_params = evaluate_rhythm_params(X_test, Y_eval_params, period=period)
if plot:
pop_name = "_".join(test.split("_")[:-1])
Y_plot_fits = results.predict(X_plot_fits)
if not hold:
plt.plot(X_plot,Y_plot_fits, color=color, label="population fit")
else:
plt.plot(X_plot,Y_plot_fits, color=color, label=pop_name)
plt.legend()
if x_label:
plt.xlabel(x_label)
else:
plt.xlabel('time [h]')
if y_label:
plt.ylabel(y_label)
else:
plt.ylabel('measurements')
min_Y_test = min(min_Y_test, np.min(Y_eval_params))
max_Y_test = max(max_Y_test, np.max(Y_eval_params))
if plot and plot_margins:
if k == 1:
_, lower, upper = wls_prediction_std(results, exog=X_plot_fits, alpha=0.05)
else:
#lower = np.mean(lowers, axis=0)
#upper = np.mean(uppers, axis=0)
var_Y = np.var(Y_plot_fits_all, axis=0, ddof = k-1)
sd_Y = var_Y**0.5
lower = Y_plot_fits - ((t*sd_Y)/((k-1)**0.5))
upper = Y_plot_fits + ((t*sd_Y)/((k-1)**0.5))
plt.fill_between(X_plot, lower, upper, color=color, alpha=0.1)
if plot:
if plot_measurements:
if model_type == 'lin':
plt.axis([min(min_X,0), 1.1*max(max_X,period), 0.9*min(min_Y, min_Y_test), 1.1*max(max_Y, max_Y_test)])
else:
plt.axis([min(min_X,0), max_X, 0.9*min(min_Y, min_Y_test), 1.1*max(max_Y, max_Y_test)])
else:
plt.axis([0, period, min_Y_test*0.9, max_Y_test*1.1])
if plot:
#pop_name = "_".join(test.split("_")[:-1])
if not hold:
plt.title(pop_name + ', p-value=' + "{0:.5f}".format(statistics['p']))
if save_to:
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
plt.close()
else:
plt.show()
statistics = calculate_statistics(x, y, Y_fit, n_components, period, lin_comp)
statistics_params = {'values': means,
'SE': se,
'CI': (lower_CI, upper_CI),
'p-values': p_values}
if params_CI:
population_eval_params_CI(X_test, X_fit_eval_params, results, statistics_params, rhythm_params, samples_per_param=samples_per_param_CI, max_samples = max_samples_CI, k=k, sampling_type=sampling_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if return_individual_params:
return params, statistics, statistics_params, rhythm_params, results, ind_params
else:
return params, statistics, statistics_params, rhythm_params, results
def fit_me(X, Y, n_components = 2, period = 24, lin_comp = False, model_type = 'lin', alpha = 0, name = '', save_to = '', plot=True, plot_residuals=False, plot_measurements=True, plot_margins=True, return_model = False, color = False, plot_phase = True, hold=False, x_label = "", y_label = "", rescale_to_period=False, bootstrap=False, bootstrap_size=1000, bootstrap_type="std", params_CI = False, samples_per_param_CI=5, max_samples_CI = 1000, sampling_type="LHS", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase']):
#print(lin_comp)
"""
###
# prepare the independent variables
###
"""
"""
if n_components == 0:
X_fit = X
X_fit_test = X_test
lin_comp = True
else:
for i in np.arange(n_components):
n = i+1
A = np.sin((X/(period/n))*np.pi*2)
B = np.cos((X/(period/n))*np.pi*2)
A_test = np.sin((X_test/(period/n))*np.pi*2)
B_test = np.cos((X_test/(period/n))*np.pi*2)
if not i:
X_fit = np.column_stack((A, B))
X_fit_test = np.column_stack((A_test, B_test))
else:
X_fit = np.column_stack((X_fit, np.column_stack((A, B))))
X_fit_test = np.column_stack((X_fit_test, np.column_stack((A_test, B_test))))
"""
X_fit = generate_independents(X, n_components=n_components, period=period, lin_comp=lin_comp)
#X_fit_eval_params = X_fit_test
#if lin_comp and n_components:
# X_fit = np.column_stack((X, X_fit))
# X_fit_eval_params = np.column_stack((np.zeros(len(X_test)), X_fit_test))
# X_fit_test = np.column_stack((X_test, X_fit_test))
#X_fit = sm.add_constant(X_fit, has_constant='add')
#X_fit_test = sm.add_constant(X_fit_test, has_constant='add')
#X_fit_eval_params = sm.add_constant(X_fit_eval_params, has_constant='add')
"""
###
# fit
###
"""
if model_type == 'lin':
model = sm.OLS(Y, X_fit)
results = model.fit()
elif model_type == 'poisson':
#model = sm.GLM(Y, X_fit, family=sm.families.Poisson())
model = statsmodels.discrete.discrete_model.Poisson(Y, X_fit)
results = model.fit(disp=0)
elif model_type =='gen_poisson':
#model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit)
model = statsmodels.discrete.discrete_model.GeneralizedPoisson(Y, X_fit, p=1)
results = model.fit(disp=0)
elif model_type == 'nb':
# https://towardsdatascience.com/negative-binomial-regression-f99031bb25b4
# https://dius.com.au/2017/08/03/using-statsmodels-glms-to-model-beverage-consumption/#cameron
# if not alpha:
# train_model = sm.GLM(Y, X_fit, family=sm.families.Poisson())
# train_results = train_model.fit()
# df_train = pd.DataFrame()
# df_train['Y'] = Y
# df_train['mu'] = train_results.mu
# df_train['AUX_OLS_DEP'] = df_train.apply(lambda x: ((x['Y'] - x['mu'])**2 - x['Y']) / x['mu'], axis=1)
# ols_expr = """AUX_OLS_DEP ~ mu - 1"""
# aux_olsr_results = smf.ols(ols_expr, df_train).fit()
# alpha=aux_olsr_results.params[0]
#model = sm.GLM(Y, X_fit, family=sm.families.NegativeBinomial(alpha=alpha))
model = statsmodels.discrete.discrete_model.NegativeBinomialP(Y, X_fit, p=1)
results = model.fit(disp=0)
else:
print("Invalid option")
return
if model_type =='lin':
Y_fit = results.fittedvalues
else:
Y_fit = results.predict(X_fit)
if model_type in ['lin', 'poisson', 'nb']:
statistics = calculate_statistics(X, Y, Y_fit, n_components, period, lin_comp)
if model_type in ['poisson', 'nb']:
statistics['count'] = np.sum(Y)
else:
RSS = sum((Y - Y_fit)**2)
p = results.llr_pvalue
statistics = {'p':p, 'RSS':RSS, 'count': np.sum(Y)}
#Y_test = results.predict(X_fit_test)
X_test = np.linspace(0, 2*period, 1000)
X_fit_test = generate_independents(X_test, n_components=n_components, period=period, lin_comp=lin_comp, remove_lin_comp = True)
Y_fit_test = results.predict(X_fit_test)
rhythm_params = evaluate_rhythm_params(X_test, Y_fit_test, period=period)
if lin_comp:
rhythm_params['lin_comp'] = results.params[1]
CIs = results.conf_int()
if type(CIs) != np.ndarray:
rhythm_params['CI(lin_comp)'] = CIs.values[1]
else:
rhythm_params['CI(lin_comp)'] = CIs[1]
rhythm_params['p(lin_comp)'] = results.pvalues[1]
#print(rhythm_params['p(lin_comp)'])
"""
###
# plot
###
"""
if plot:
if plot_measurements:
min_X = np.min(X)
max_X = np.max(X)
else:
min_X = 0
max_X = period
X_plot = np.linspace(min_X, max_X, 1000)
X_plot_fits = generate_independents(X_plot, n_components=n_components, period=period, lin_comp=lin_comp)
Y_plot = results.predict(X_plot_fits)
###
if not color:
color = 'black'
if plot_measurements:
if not hold:
plt.plot(X,Y, 'ko', markersize=1, label = 'data', color=color)
else:
plt.plot(X,Y, 'ko', markersize=1, color=color)
if not hold:
plt.plot(X_plot, Y_plot, 'k', label = 'fit', color=color)
else:
plt.plot(X_plot, Y_plot, 'k', label = name, color=color)
# plot measurements
if plot_measurements:
if rescale_to_period:
X = X % period
if model_type == 'lin':
plt.axis([min_X, max_X, 0.9*min(min(Y), min(Y_plot)), 1.1*max(max(Y), max(Y_plot))])
else:
plt.axis([min_X, max_X, 0.9*min(min(Y), min(Y_plot)), 1.1*max(max(Y), max(Y_plot))])
else:
plt.axis([min_X, max_X, min(Y_plot)*0.9, max(Y_plot)*1.1])
if name:
plt.title(name)
"""
if model_type == 'lin':
if name:
plt.title(name + ', p-value=' + "{0:.5f}".format(statistics['p']))
else:
plt.title('p-value=' + "{0:.5f}".format(statistics['p']))
else:
if name:
plt.title(name + ', p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')')
else:
plt.title('p-value=' + '{0:.3f}'.format(statistics['p']) + ' (n='+str(statistics['count'])+ ')')
"""
if x_label:
plt.xlabel(x_label)
else:
plt.xlabel('Time [h]')
if y_label:
plt.ylabel(y_label)
elif model_type == 'lin':
plt.ylabel('Measurements')
else:
plt.ylabel('Count')
# plot confidence intervals
if plot_margins:
if model_type == 'lin':
_, lower, upper = wls_prediction_std(results, exog=X_plot_fits, alpha=0.05)
if color:
plt.fill_between(X_plot, lower, upper, color=color, alpha=0.1)
else:
plt.fill_between(X_plot, lower, upper, color='#888888', alpha=0.1)
else:
# calculate and draw plots from the combinations of parameters from the 95 % confidence intervals of assessed parameters
res2 = copy.deepcopy(results)
params = res2.params
CIs = results.conf_int()
if type(CIs) != np.ndarray:
CIs = CIs.values
#N = 512
N = 1024
if n_components == 1:
N2 = 10
elif n_components == 2:
N2 = 8
else:
N2 = 10 - n_components
P = np.zeros((len(params), N2))
for i, CI in enumerate(CIs):
P[i,:] = np.linspace(CI[0], CI[1], N2)
n_param_samples = P.shape[1]**P.shape[0]
N = n_param_samples #min(max_samples_CI, n_param_samples)
if n_param_samples < 10**6:
params_samples = np.random.choice(n_param_samples, size=N, replace=False)
else:
params_samples = my_random_choice(max_val=n_param_samples, size=N)
for i,idx in enumerate(params_samples):
p = lazy_prod(idx, P)
res2.initialize(results.model, p)
Y_test_CI = res2.predict(X_plot_fits)
if plot and plot_margins:
if color and color != '#000000':
plt.plot(X_plot, Y_test_CI, color=color, alpha=0.05)
else:
plt.plot(X_plot, Y_test_CI, color='#888888', alpha=0.05)
if not hold:
if save_to:
plt.savefig(save_to+'.png')
plt.savefig(save_to+'.pdf')
plt.close()
else:
plt.show()
if plot_residuals:
resid = results.resid
sm.qqplot(resid)
plt.title(name)
if save_to:
plt.savefig(save_to+'_resid.pdf', bbox_inches='tight')
plt.savefig(save_to+'_resid.png')
plt.close()
else:
plt.show()
if plot_phase:
per = rhythm_params['period']
amp = rhythm_params['amplitude']
phase = rhythm_params['acrophase']
if save_to:
folder = os.path.join(*os.path.split(save_to)[:-1])
plot_phases([phase], [amp], [name], period=per, folder=folder)
else:
plot_phases([phase], [amp], [name], period=per)#, plot_measurements=True, measurements=[X,Y])
if bootstrap:
eval_params_bootstrap(X, X_fit, X_test, X_fit_test, Y, model_type = model_type, rhythm_params=rhythm_params, bootstrap_size=bootstrap_size, bootstrap_type=bootstrap_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if params_CI:
eval_params_CI(X_test, X_fit_test, results, rhythm_params, samples_per_param = samples_per_param_CI, max_samples = max_samples_CI, k=len(X), sampling_type=sampling_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, period=period)
if return_model:
return results, statistics, rhythm_params, X_test, Y_fit_test, model
else:
return results, statistics, rhythm_params, X_test, Y_fit_test
"""
****************************
* end of fitting functions *
****************************
"""
"""
***********************
* start of assessment *
***********************
"""
# rhythm params
def evaluate_rhythm_params(X,Y, project_acrophase=True, period=0):
#plt.plot(X,Y)
#plt.show()
m = min(Y)
M = max(Y)
A = M - m
MESOR = m + A/2
AMPLITUDE = abs(A/2)
PHASE = 0
PHASE_LOC = 0
H = M - 0.01*M if M >= 0 else M + 0.01*M
locs, heights = signal.find_peaks(Y, height = H)
heights = heights['peak_heights']
if len(locs) >= 2:
period2 = X[locs[1]] - X[locs[0]]
period2 = int(round(period2))
else:
period2 = np.nan
if not period:
period = period2
if len(locs) >= 1:
PHASE = X[locs[0]]
PHASE_LOC = locs[0]
if period:
ACROPHASE = phase_to_radians(PHASE, period)
if project_acrophase:
ACROPHASE = project_acr(ACROPHASE)
else:
ACROPHASE = np.nan
# peaks and heights
#Y = Y[X < 24]
#X = X[X < 24]
locs, heights = signal.find_peaks(Y, height = MESOR)
heights = heights['peak_heights']
peaks = X[locs]
heights = Y[locs]
idxs1 = peaks <= period
peaks = peaks[idxs1]
heights = heights[idxs1]
Y2 = M - Y
locs2, heights2 = signal.find_peaks(Y2, height = MESOR-m)
heights2 = heights2['peak_heights']
troughs = X[locs2]
heights2 = Y[locs2]
idxs2 = troughs <= period
troughs = troughs[idxs2]
heights2 = heights2[idxs2]
# rhythm_params
return {'period':period, 'amplitude':AMPLITUDE, 'acrophase':ACROPHASE, 'mesor':MESOR, 'peaks': peaks, 'heights': heights, 'troughs': troughs, 'heights2': heights2, 'max_loc': PHASE_LOC, 'period2':period2}
def calculate_statistics(X, Y, Y_fit, n_components, period, lin_comp = False):
# statistics according to Cornelissen (eqs (8) - (9))
MSS = sum((Y_fit - Y.mean())**2)
RSS = sum((Y - Y_fit)**2)
n_params = n_components * 2 + 1
if lin_comp:
n_params += 1
N = Y.size
F = (MSS/(n_params - 1)) / (RSS/(N - n_params))
p = 1 - stats.f.cdf(F, n_params - 1, N - n_params)
#print("p-value(Cornelissen): {}".format(p))
# statistics of GOF according to Cornelissen (eqs (14) - (15))
# TODO: ali bi bilo potrebno popraviti za lumicycle - ko je več zaporednih meritev v eni časovni točki?
#X_periodic = (X % period).astype(int)
X_periodic = np.round_(X % period,2)
X_unique = np.unique(X_periodic)
n_T = len(X_unique)
SSPE = 0
for x in X_unique:
Y_i_avg = np.mean(Y[X_periodic == x])
SSPE += sum((Y[X_periodic == x] - Y_i_avg)**2)
SSLOF = RSS-SSPE
#print('RSS: ', RSS)
#print('SSPE: ', SSPE)
#print('SSLOF: ', SSLOF)
if lin_comp:
try:
F = (SSLOF/(n_T-1-(2*n_components + 1)))/(SSPE/(N-n_T))
p_reject = 1 - stats.f.cdf(F, n_T-1-(2*n_components + 1), N-n_T)
except:
F = np.nan
p_reject = np.nan
else:
try:
F = (SSLOF/(n_T-1-2*n_components))/(SSPE/(N-n_T))
p_reject = 1 - stats.f.cdf(F, n_T-1-2*n_components, N-n_T)
except:
F = np.nan
p_reject = np.nan
# Another measure that describes goodnes of fit
# How well does the curve describe the data?
# signal to noise ratio
# fitted curve: signal
# noise:
stdev_data = np.std(Y, ddof = 1)
stdev_fit = np.std(Y_fit, ddof = 1)
SNR = stdev_fit / stdev_data
# Standard Error of residuals, margin of error
# https://stats.stackexchange.com/questions/57746/what-is-residual-standard-error
DoF = N - n_params
resid_SE = np.sqrt(RSS/DoF)
# https://scientificallysound.org/2017/05/16/independent-t-test-python/
# https://www.statisticshowto.datasciencecentral.com/probability-and-statistics/hypothesis-testing/margin-of-error/
critical_value = stats.t.ppf(1-0.025, DoF)
ME = critical_value * resid_SE
return {'p':p, 'p_reject':p_reject, 'SNR':SNR, 'RSS': RSS, 'resid_SE': resid_SE, 'ME': ME}
"""
*********************
* end of assessment *
*********************
"""
"""
*****************************
* start of compare wrappers *
*****************************
"""
# compare pairs using a given number of components and period
# analysis - options (from best to worst) (ADDITIONAL ANALYSIS)
# - bootstrap1: independent bootstrap analysis
# - CI1: independent analysis of confidence intervals of two models
# - bootstrap2: bootstrap analysis of a merged model
# - CI2: analysis of confidence intervals of a merged model
def compare_pairs_limo(df, pairs, n_components = 3, period = 24, folder = "", prefix = "", analysis = "", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if analysis not in ("", "CI1", "bootstrap1", "CI2", "bootstrap2"):
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p', 'q', 'p params', 'q params', 'p(F test)', 'q(F test)']
if analysis:
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for test1, test2 in pairs:
for per in period:
for n_comps in n_components:
if folder:
save_to = os.path.join(folder,prefix + test1 + '-' + test2 + '_per=' + str(per) + '_comps=' + str(n_comps))
else:
save_to = ''
#pvalues, params, results = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, lin_comp = lin_comp, model_type = model_type, alpha=alpha, save_to = save_to, plot_measurements=plot_measurements)
#p_overall, pvalues, params, _ = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, save_to = save_to, **kwargs)
p_overall, p_params, p_F, _, _, rhythm_params = compare_pair_df_extended(df, test1, test2, n_components = n_comps, period = per, save_to = save_to, additional_analysis = analysis, parameters_to_analyse=parameters_to_analyse, parameters_angular=parameters_angular, **kwargs)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period'] = per
d['n_components'] = n_comps
d['d_amplitude'] = rhythm_params['d_amplitude']
d['d_acrophase'] = rhythm_params['d_acrophase']
d['p'] = p_overall
d['p params'] = p_params
d['p(F test)'] = p_F
if analysis:
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
df_results['q'] = multi.multipletests(df_results['p'], method = 'fdr_bh')[1]
df_results['q params'] = multi.multipletests(df_results['p params'], method = 'fdr_bh')[1]
df_results['q(F test)'] = multi.multipletests(df_results['p(F test)'], method = 'fdr_bh')[1]
if analysis:
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using the best models as stored in df_best_models
# Basic analysis: fist analysis according to LymoRhyde (Singer:2019). Extended with the extra sum-of-squares F test that compares two nested models
# compare pairs with the presumption that the same model is used in both cases
# the same model: the same period and the same number of cosinor components
#
# analysis - options (from best to worst)
# - bootstrap1: independent bootstrap analysis
# - CI1: independent analysis of confidence intervals of two models
# - bootstrap2: bootstrap analysis of a merged model
# - CI2: analysis of confidence intervals of a merged model
def compare_pairs_best_models_limo(df, df_best_models, pairs, folder = "", prefix = "", analysis = "", parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if analysis not in ("", "CI1", "bootstrap1", "CI2", "bootstrap2"):
print("Invalid option")
return
columns = ['test', 'period1', 'n_components1', 'period2', 'n_components2', 'p', 'q', 'p params', 'q params', 'p(F test)', 'q(F test)']
if analysis:
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
for test1, test2 in pairs:
model1 = df_best_models[df_best_models["test"] == test1].iloc[0]
model2 = df_best_models[df_best_models["test"] == test2].iloc[0]
n_components1 = model1.n_components
n_components2 = model2.n_components
period1 = model1.period
period2 = model2.period
# if models have different number of components always start with the simpler model
# model is simpler if number of components is smaller
if n_components1 > n_components2:
test1, test2 = test2, test1
n_components1, n_components2 = n_components2, n_components1
period1, period2 = period2, period1
if folder:
save_to = os.path.join(folder, prefix + test1 + '-' + test2 + '_per1=' + str(period1) + '_comps1=' + str(n_components1) + '_per1=' + str(period2) + '_comps1=' + str(n_components2))
else:
save_to = ''
p_overall, p_params, p_F, params, _, rhythm_params = compare_pair_df_extended(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2 = period2, save_to = save_to, additional_analysis = analysis, parameters_to_analyse=parameters_to_analyse, parameters_angular=parameters_angular, **kwargs)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period1'] = period1
d['n_components1'] = n_components1
d['period2'] = period2
d['n_components2'] = n_components2
d['d_amplitude'] = rhythm_params['d_amplitude']
d['d_acrophase'] = rhythm_params['d_acrophase']
d['p'] = p_overall
d['p params'] = p_params
d['p(F test)'] = p_F
if analysis:
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
#d['CI(d_amplitude)'] = rhythm_params['CI(d_amplitude)']
#d['p(d_amplitude)'] = rhythm_params['p(d_amplitude)']
#d['CI(d_acrophase)'] = rhythm_params['CI(d_acrophase)']
#d['p(d_acrophase)'] = rhythm_params['p(d_acrophase)']
df_results = df_results.append(d, ignore_index=True)
df_results['q'] = multi.multipletests(df_results['p'], method = 'fdr_bh')[1]
df_results['q params'] = multi.multipletests(df_results['p params'], method = 'fdr_bh')[1]
df_results['q(F test)'] = multi.multipletests(df_results['p(F test)'], method = 'fdr_bh')[1]
if analysis:
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using a given number of components and period
# analysis - options (from best to worst)
# - bootstrap: independent bootstrap analysis
# - CI: independent analysis of confidence intervals of two models
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_models
def diff_p_t_test_from_CI(X1, X2, CI1, CI2, DoF, angular = False):
dX = X2 - X1
if angular:
dX = project_acr(dX)
t = abs(stats.t.ppf(0.05/2,df=DoF))
dev1 = (CI1[1] - CI1[0])/2
dev2 = (CI2[1] - CI2[0])/2
if angular:
dev1 = abs(project_acr(dev1))
dev2 = abs(project_acr(dev2))
else:
dev1 = abs(dev1)
dev2 = abs(dev2)
dev = dev1+dev2
se = (dev1 + dev2)/t
CI = [dX-dev, dX+dev]
T0 = dX/se
p_val = 2 * (1 - stats.t.cdf(abs(T0), DoF))
return dX, p_val, CI
def compare_pairs(df, pairs, n_components = 3, period = 24, analysis = "bootstrap", df_results_extended = pd.DataFrame(columns=["test"]), parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], lin_comp = False, **kwargs):
if (analysis != "CI") and (analysis != "bootstrap"):
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p1', 'p2', 'q1', 'q2']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for test1, test2 in pairs:
for per in period:
for n_comps in n_components:
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period'] = per
d['n_components'] = n_comps
single_params = {}
if (test1 in list(df_results_extended['test'])) and (test2 in list(df_results_extended['test'])):
try:
res1 = dict(df_results_extended[(df_results_extended['test'] == test1) & (df_results_extended['n_components'] == n_comps) & (df_results_extended['period'] == per)].iloc[0])
res2 = dict(df_results_extended[(df_results_extended['test'] == test2) & (df_results_extended['n_components'] == n_comps) & (df_results_extended['period'] == per)].iloc[0])
single_params["test1"] = {}
single_params["test2"] = {}
for param in parameters_to_analyse:
single_params["test1"][f'CI({param})'] = res1[f'CI({param})']
single_params["test2"][f'CI({param})'] = res2[f'CI({param})']
except:
pass
if analysis == "CI":
rhythm_params = compare_pair_CI(df, test1, test2, n_components = n_comps, period = per, single_params=single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
elif analysis == "bootstrap":
rhythm_params = compare_pair_bootstrap(df, test1, test2, n_components = n_comps, period = per, single_params=single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
d['p1'] = rhythm_params['statistics1']['p']
d['p2'] = rhythm_params['statistics2']['p']
if lin_comp:
rp1 = rhythm_params['rhythm_params1']
rp2 = rhythm_params['rhythm_params2']
d['d_lin_comp'], d['p(d_lin_comp)'], d['CI(d_lin_comp)'] = diff_p_t_test_from_CI(rp1['lin_comp'], rp2['lin_comp'], rp1['CI(lin_comp)'], rp2['CI(lin_comp)'], rhythm_params['DoF'])
df_results = df_results.append(d, ignore_index=True)
df_results['q1'] = multi.multipletests(df_results['p1'], method = 'fdr_bh')[1]
df_results['q2'] = multi.multipletests(df_results['p2'], method = 'fdr_bh')[1]
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
if lin_comp:
param = "lin_comp"
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using the best models as stored in df_best_models
# each member of a pair uses its own model
# analysis - options (from best to worst)
# - bootstrap: independent bootstrap analysis
# - CI: independent analysis of confidence intervals of two models
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_best_models
def compare_pairs_best_models(df, df_best_models, pairs, analysis = "bootstrap", df_results_extended = pd.DataFrame(columns=["test"]), parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], lin_comp=False, **kwargs):
if (analysis != "CI") and (analysis != "bootstrap"):
print("Invalid option")
return
columns = ['test', 'period1', 'n_components1', 'period2', 'n_components2', 'p1', 'p2', 'q1', 'q2']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
df_results = pd.DataFrame(columns = columns)
for test1, test2 in pairs:
model1 = df_best_models[df_best_models["test"] == test1].iloc[0]
model2 = df_best_models[df_best_models["test"] == test2].iloc[0]
n_components1 = model1.n_components
n_components2 = model2.n_components
period1 = model1.period
period2 = model2.period
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period1'] = period1
d['n_components1'] = n_components1
d['period2'] = period2
d['n_components2'] = n_components2
single_params = {}
if (test1 in list(df_results_extended['test'])) and (test2 in list(df_results_extended['test'])):
try:
res1 = dict(df_results_extended[(df_results_extended['test'] == test1) & (df_results_extended['n_components'] == n_components1) & (df_results_extended['period'] == period1)].iloc[0])
res2 = dict(df_results_extended[(df_results_extended['test'] == test2) & (df_results_extended['n_components'] == n_components2) & (df_results_extended['period'] == period2)].iloc[0])
single_params["test1"] = {}
single_params["test2"] = {}
for param in parameters_to_analyse:
single_params["test1"][f'CI({param})'] = res1[f'CI({param})']
single_params["test2"][f'CI({param})'] = res2[f'CI({param})']
except:
pass
if analysis == "CI":
rhythm_params = compare_pair_CI(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2 = period2, single_params = single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
elif analysis == "bootstrap":
rhythm_params = compare_pair_bootstrap(df, test1, test2, n_components = n_components1, period = period1, n_components2 = n_components2, period2 = period2, single_params = single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, lin_comp = lin_comp, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
d['p1'] = rhythm_params['statistics1']['p']
d['p2'] = rhythm_params['statistics2']['p']
if lin_comp:
rp1 = rhythm_params['rhythm_params1']
rp2 = rhythm_params['rhythm_params2']
d['d_lin_comp'], d['p(d_lin_comp)'], d['CI(d_lin_comp)'] = diff_p_t_test_from_CI(rp1['lin_comp'], rp2['lin_comp'], rp1['CI(lin_comp)'], rp2['CI(lin_comp)'], rhythm_params['DoF'])
df_results = df_results.append(d, ignore_index=True)
df_results['q1'] = multi.multipletests(df_results['p1'], method = 'fdr_bh')[1]
df_results['q2'] = multi.multipletests(df_results['p2'], method = 'fdr_bh')[1]
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
if lin_comp:
param = "lin_comp"
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using a given number of components and period
# analysis - options (from best to worst)
# - CI: independent analysis of confidence intervals of two models
# - permutation: permutation/randomisation test
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_models_population
def compare_pairs_population(df, pairs, n_components = 3, period = 24, folder = "", prefix = "", analysis = "CI", lin_comp= False, model_type = 'lin', df_results_extended = pd.DataFrame(columns=["test"]), parameters_to_analyse = ['amplitude', 'acrophase', 'mesor'], parameters_angular = ['acrophase'], **kwargs):
if (analysis != "CI") and (analysis != "permutation"):
print("Invalid option")
return
columns = ['test', 'period', 'n_components', 'p1', 'p2', 'q1', 'q2']
for param in parameters_to_analyse:
#if param not in ("amplitude", "acrophase"): # these two are already included
columns += [f'd_{param}']
if analysis == "CI":
columns += [f'CI(d_{param})', f'p(d_{param})', f'q(d_{param})']
else:
columns += [f'p(d_{param})', f'q(d_{param})'] # permutation test does not assess the confidence intervals
df_results = pd.DataFrame(columns = columns)
if type(period) == int:
period = [period]
if type(n_components) == int:
n_components = [n_components]
for test1, test2 in pairs:
for per in period:
for n_comps in n_components:
df_pop1 = df[df.test.str.startswith(test1)]
df_pop2 = df[df.test.str.startswith(test2)]
_, statistics1, _, rhythm_params1, _ = population_fit(df_pop1, n_components = n_comps, period = per, plot = False, lin_comp = lin_comp, model_type = model_type)
_, statistics2, _, rhythm_params2, _ = population_fit(df_pop2, n_components = n_comps, period = per, plot = False, lin_comp = lin_comp, model_type = model_type)
d = {}
d['test'] = test1 + ' vs. ' + test2
d['period'] = per
d['n_components'] = n_comps
d['d_amplitude'] = rhythm_params2['amplitude'] - rhythm_params1['amplitude']
d['d_acrophase'] = project_acr(rhythm_params2['acrophase'] - rhythm_params1['acrophase'])
d['p1'] = statistics1['p']
d['p2'] = statistics2['p']
if analysis == "CI":
single_params = {}
if (test1 in list(df_results_extended['test'])) and (test2 in list(df_results_extended['test'])):
try:
res1 = dict(df_results_extended[(df_results_extended['test'] == test1) & (df_results_extended['n_components'] == n_comps) & (df_results_extended['period'] == per)].iloc[0])
res2 = dict(df_results_extended[(df_results_extended['test'] == test2) & (df_results_extended['n_components'] == n_comps) & (df_results_extended['period'] == per)].iloc[0])
single_params["test1"] = {}
single_params["test2"] = {}
for param in parameters_to_analyse:
single_params["test1"][f'CI({param})'] = res1[f'CI({param})']
single_params["test2"][f'CI({param})'] = res2[f'CI({param})']
except:
pass
rhythm_params = compare_pair_population_CI(df, test1, test2, n_components=n_comps, period=per, lin_comp = lin_comp, model_type = model_type, single_params = single_params, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
elif analysis == "permutation":
rhythm_params = permutation_test_population_approx(df, [(test1,test2)], n_components=n_comps, period=per, plot=False, lin_comp = lin_comp, model_type = model_type, parameters_to_analyse = parameters_to_analyse, parameters_angular = parameters_angular, **kwargs)
for param in parameters_to_analyse:
d[f'd_{param}'] = rhythm_params[f'd_{param}']
#d[f'CI(d_{param})'] = rhythm_params[f'CI(d_{param})']
d[f'p(d_{param})'] = rhythm_params[f'p(d_{param})']
d[f'q(d_{param})'] = np.nan
df_results = df_results.append(d, ignore_index=True)
df_results['q1'] = multi.multipletests(df_results['p1'], method = 'fdr_bh')[1]
df_results['q2'] = multi.multipletests(df_results['p2'], method = 'fdr_bh')[1]
for param in parameters_to_analyse:
df_results[f'q(d_{param})'] = multi.multipletests(df_results[f'p(d_{param})'], method = 'fdr_bh')[1]
return df_results
# compare pairs using the best models as stored in best_models
# analysis - options (from best to worst)
# - CI: independent analysis of confidence intervals of two models
# - permutation: permutation/randomisation test
# if you want to increase the speed specify df_results_extended in which for all analysed models confidence intervals for amplitude and acrophase are given - result of cosinor.analyse_best_models_population
def compare_pairs_best_models_population(df, df_best_models, pairs, folder = "", prefix = "", analysis = "CI", df_results_extended = | pd.DataFrame(columns=["test"]) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 14:26:30 2020
@author: <NAME>
Program: This is a scraper to pull misdemeanor data from Travis County Clerk's
office website, and shows two methods of pulling the data from the websites.
It pulls three types of web pages:
1) The summary of defendants by date,
2) the defendant's details, and
3) the defendant's disposition details.
Finally, the program outputs the three datasets to csv files.
"""
import requests
import lxml.html as lh
import time
from bs4 import BeautifulSoup
#import numpy as np
import pandas as pd
from datetime import timedelta, date
# Lets us loop through dates
def daterange(start_date, end_date):
for n in range(int((end_date - start_date).days)):
yield start_date + timedelta(n)
###############################################################################
# Input filepaths
###############################################################################
main_file = 'misdemeanors_main.csv'
detail_file = 'misdemeanors_detail.csv'
disp_file = 'misdemeanors_disp.csv'
###############################################################################
# Gets the defendants' summaries
###############################################################################
start_date = date(2016, 1, 1)
end_date = date(2019, 1, 1)
main_df = pd.DataFrame()
for single_date in daterange(start_date, end_date):
date_str = single_date.strftime("%Y-%m-%d")
print(date_str)
URL_sum = 'https://countyclerk.traviscountytx.gov/component/chronoconnectivity6/?cont=manager&conn=misdemeanor-data&calendar_misdemeanor_start='+ date_str+'&calendar_misdemeanor_end='+date_str+'&event=index'
#Create a handle, page, to handle the contents of the website
page = requests.get(URL_sum)
#Store the contents of the website under doc
doc = lh.fromstring(page.content)
#Parse data that are stored between <tr>..</tr> of HTML
tr_elements = doc.xpath('//tr')
print([len(T) for T in tr_elements[:12]])
#Create empty list
col=[]
i=0
#For each row, store each first element (header) and an empty list
for t in tr_elements[2]:
i+=1
header=t.text_content()
print(header)
col.append((header,[]))
# Since first 2 rows are not table and third row is the header,
# data is stored on the second row onwards
for j in range(3,len(tr_elements)):
#T is our j'th row
T=tr_elements[j]
#If row is not of size 8, the //tr data is not from our table
if len(T)!=8:
break
#i is the index of our column
i=0
#Iterate through each element of the row
for t in T.iterchildren():
data=t.text_content()
#Check if row is empty
if i>0:
#Convert any numerical value to integers
try:
data=int(data)
except:
pass
#Append the data to the empty list of the i'th column
col[i][1].append(data)
#Increment i for the next column
i+=1
[len(C) for (title,C) in col]
Dict={title:column for (title,column) in col}
df=pd.DataFrame(Dict)
# append to main dataframe
main_df = main_df.append(df)
# hopefully prevents us from looking like spam
time.sleep(1)
main_df.rename(columns=lambda x: x.strip(), inplace = True)
main_df.rename(columns={ main_df.columns[0]: "Cause No" }, inplace = True)
for column in main_df:
print(column)
main_df[column] = main_df[column].astype(str).map(lambda x: x.strip('\n'))
main_df[column] = main_df[column].astype(str).map(lambda x: x.strip('\t'))
# save to csv
main_df.to_csv(main_file, index=False)
###############################################################################
# Gets defendant details
# The idea is to loop through the Cause Nos pulled from the scrape of the
# defendants' summaries by date.
###############################################################################
main_df = pd.read_csv(main_file)
# initialize blank lists to add data to data frame later
cause_list = []
id_list = []
name_list = []
race_list = []
gender_list = []
ethnicity_list = []
attorney_list = []
court_list = []
for index, row in main_df.iterrows():
cause = row['Cause No']
URL_details = 'https://countyclerk.traviscountytx.gov/component/chronoconnectivity6/?cont=manager&conn=misdemeanor-data&event=view&cause_number='+cause
response_details = requests.get(URL_details)
soup_details = BeautifulSoup(response_details.text, "html.parser")
def_details = soup_details.findAll('td')
# This can pull the headers for the data
header = soup_details.findAll(class_ = 'ui header')
# if def_details block is the correct def_detailsblock, then proceed
if(str(def_details[1]) == str('<td class="collapsing right aligned"><h4 class="ui header">Cause No</h4></td>')):
cause_list.append(def_details[2])
else:
cause_list.append('')
if(str(def_details[3]) == str('<td class="collapsing right aligned"><h4 class="ui header">Participant ID</h4></td>')):
id_list.append(def_details[4])
else:
id_list.append('')
if(str(def_details[5]) == str('<td class="collapsing right aligned"><h4 class="ui header">Full Name</h4></td>')):
name_list.append(def_details[6])
else:
name_list.append('')
if(str(def_details[7]) == str('<td class="collapsing right aligned"><h4 class="ui header">Race</h4></td>')):
race_list.append(def_details[8])
else:
race_list.append('')
if(str(def_details[9]) == str('<td class="collapsing right aligned"><h4 class="ui header">Gender</h4></td>')):
gender_list.append(def_details[10])
else:
gender_list.append('')
if(str(def_details[11]) == str('<td class="collapsing right aligned"><h4 class="ui header">Ethnicity</h4></td>')):
ethnicity_list.append(def_details[12])
else:
ethnicity_list.append('')
if(str(def_details[13]) == str('<td class="collapsing right aligned"><h4 class="ui header">Attorney Name</h4></td>')):
attorney_list.append(def_details[14])
else:
attorney_list.append('')
if(str(def_details[15]) == str('<td class="collapsing right aligned"><h4 class="ui header">Court Assignment</h4></td>')):
court_list.append(def_details[16])
else:
court_list.append('')
print(cause)
df_detail = | pd.DataFrame(columns = ['Cause No', 'Participant ID', 'Full Name', 'Race', 'Gender', 'Ethnicity', 'Attorney Name', 'Court Assignment']) | pandas.DataFrame |
import pickle
import numpy as np
import pandas as pd
import time
from model.helper_functions import stub_withhold_split, val_test_features
start_time = time.time(), time.ctime()
print(f'Start time: {start_time[1]}')
# Build df of playlists to classify in clusters
val_pids = np.genfromtxt('../data/val_pids.csv', skip_header=1, dtype=int)
# Import data to memory so it is not loaded from disk for every loop iteration
playlist_df = pd.read_csv('../data/playlists.csv')
track_df = | pd.read_csv('../data/songs_100000_feat_cleaned.csv', index_col='track_uri') | pandas.read_csv |
# Importer le fichier et afficher les premières lignes :
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df=pd.read_csv('D:\projet_energie.csv', sep=';', low_memory=False)
df.head()
#Création “Date_datetime” au format date :
from datetime import datetime as dt
df["Date_datetime"]=pd.to_datetime(df.Date)
df.head()
#“Heure” au format heure
from datetime import datetime as dt
df["Heure"]=df.Heure.astype("datetime64")
df["Heure"]=df.Heure.dt.hour
df.head()
#Affichage des colonnes du dataset
df.columns
#Visualisation des informations des colonnes
df.dtypes
#Supression de la variable “Nature”
df=df.drop("Nature", axis=1)
df.head()
#Création “Date_datetime” au format date :
from datetime import datetime as dt
df["Date_datetime"]= | pd.to_datetime(df.Date) | pandas.to_datetime |
import carla
from carla import ColorConverter as cc
from ROAR_Sim.carla_client.util.sensors import CollisionSensor
from ROAR_Sim.configurations.configuration import Configuration as CarlaConfig
import logging
import pygame
from ROAR.utilities_module.data_structures_models import SensorsData
from ROAR.utilities_module.vehicle_models import Vehicle
from typing import Tuple
from Bridges.carla_bridge import CarlaBridge
from ROAR_Sim.carla_client.util.hud import HUD
from ROAR_Sim.carla_client.util.world import World
from ROAR_Sim.carla_client.util.keyboard_control import KeyboardControl
from ROAR.configurations.configuration import Configuration as AgentConfig
from ROAR_Sim.configurations.configuration import import_carla
from pathlib import Path
from typing import List, Dict, Any
from ROAR.utilities_module.vehicle_models import VehicleControl
import json
from typing import Optional
import numpy as np
import cv2
from threading import Thread
from datetime import datetime
import time
import pandas as pd
from pandas import ExcelWriter
from datetime import datetime
class CarlaRunner:
def __init__(self,
carla_settings: CarlaConfig,
agent_settings: AgentConfig,
npc_agent_class,
competition_mode=False,
start_bbox: np.ndarray = np.array([5, -5, 0, 13, 5, 50]),
lap_count=10):
"""
Args:
carla_settings: CarlaConfig instance
agent_settings: AgentConfig instance
npc_agent_class: an agent class
competition_mode: [Optional] True/False
start_bbox: [Optional] array of [minx, miny, minz, maxx, maxy, maxz].
[5, -5, 0, 13, 5, 50] is the bbox for easy_map.
[-815, 20, -760, -770, 120, -600] is the bbox for berkeley_minor_map
lap_count: [Optional] total lap count
"""
self.carla_settings = carla_settings
self.agent_settings = agent_settings
self.carla_bridge = CarlaBridge()
self.npc_agent_class = npc_agent_class
self.world = None
self.client = None
self.controller = None
self.display = None
self.agent = None
self.npc_agents: Dict[npc_agent_class, Any] = {}
self.agent_collision_counter = 0
self.competition_mode = competition_mode
self.start_bbox = start_bbox
self.lap_count = lap_count
self.completed_lap_count = 0
self.sensor_data = SensorsData()
self.vehicle_state = Vehicle()
self.start_simulation_time: Optional[float] = None
self.start_vehicle_position: Optional[np.array] = None
self.end_simulation_time: Optional[float] = None
self.end_vehicle_position: Optional[np.array] = None
self.logger = logging.getLogger(__name__)
self.timestep_counter = 0
self.position = np.empty((0, 3))
def set_carla_world(self) -> Vehicle:
"""
Initiating the vehicle with loading messages
Returns:
Vehicle Information
"""
try:
pygame.init()
pygame.font.init()
self.logger.debug(f"Connecting to {self.carla_settings.host}: "
f"{self.carla_settings.port}")
self.client = carla.Client(self.carla_settings.host,
self.carla_settings.port)
if not self.check_version(client=self.client):
self.logger.error(f"Version Mismatch: Client = {self.client.get_client_version()}, "
f"Server = {self.client.get_server_version()}. \n"
f"HINT: Please change carla_version to either 0.9.9 or 0.9.10 "
f"in ROAR_Sim.configurations.carla_version.txt")
exit(1)
if self.carla_settings.should_visualize_with_pygame is True:
self.display = pygame.display.set_mode(
(self.carla_settings.width, self.carla_settings.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
self.logger.debug(f"Setting HUD")
hud = HUD(self.carla_settings.width, self.carla_settings.height)
self.logger.debug("Setting up world")
self.world = World(carla_world=self.client.get_world(), hud=hud,
carla_settings=self.carla_settings,
agent_settings=self.agent_settings)
if self.carla_settings.should_spawn_npcs:
self.spawn_npcs()
self.logger.debug(f"Connecting to Keyboard controls")
self.controller = KeyboardControl(world=self.world,
carla_setting=self.carla_settings
)
self.logger.debug("All settings done")
return self.carla_bridge. \
convert_vehicle_from_source_to_agent(self.world.player)
except Exception as e:
self.logger.error(
f"Unable to initiate the world due to error: {e}")
raise e
def start_game_loop(self,
agent,
use_manual_control=False,
starting_lap_count=0):
"""Start running the vehicle and stop when finished running
the track"""
self.agent = agent
lap_count = starting_lap_count
has_entered_bbox = False
should_restart_lap = False
try:
self.logger.debug("Initiating game")
self.agent.start_module_threads()
clock = pygame.time.Clock()
self.start_simulation_time = time.time()
self.start_vehicle_position = self.agent.vehicle.transform.location.to_array()
while True:
# make sure the program does not run above 60 frames per second
# this allow proper synchrony between server and client
clock.tick_busy_loop(60)
should_continue, carla_control = self.controller.parse_events(client=self.client,
world=self.world,
clock=clock)
self.agent_collision_counter = self.get_num_collision()
if self.competition_mode:
is_currently_in_bbox = self.is_within_start_finish_bbox(
curr_pos=self.agent.vehicle.transform.location.to_array())
if has_entered_bbox is True and is_currently_in_bbox is False:
has_entered_bbox = False
elif has_entered_bbox is False and is_currently_in_bbox is True:
has_entered_bbox = True
lap_count += 1
if lap_count > self.lap_count:
# if i have reached target number of lap counts, break out of game loop
break
else:
self.logger.info(f"Going onto Lap {lap_count} out of {self.lap_count}")
if len(self.world.collision_sensor.history) > 0:
should_restart_lap = True
if should_restart_lap:
should_continue = False
# check for exiting condition
if should_continue is False:
break
self.world.tick(clock)
self.world.render(display=self.display)
if self.carla_settings.should_visualize_with_pygame is True:
pygame.display.flip()
self.fetch_data_async()
sensor_data, new_vehicle = self.sensor_data.copy(), self.vehicle_state.copy()
# Add for dynamics logging
pos = self.agent.vehicle.transform.location
self.position = np.append(self.position, [[pos.x, pos.y, pos.z]], axis=0)
if self.carla_settings.save_semantic_segmentation and self.world.semantic_segmentation_sensor_data:
Thread(target=lambda: self.world.semantic_segmentation_sensor_data.save_to_disk((Path(
"./data/output_oct_10") / "ss" / f"frame_{self.agent.time_counter}.png").as_posix(),
cc.CityScapesPalette),
args=()).start()
if self.carla_settings.should_spawn_npcs:
self.execute_npcs_step()
if self.agent_settings.enable_autopilot:
if self.agent is None:
raise Exception(
"In autopilot mode, but no agent is defined.")
agent_control = self.agent.run_step(vehicle=new_vehicle,
sensors_data=sensor_data)
if not use_manual_control:
carla_control = self.carla_bridge. \
convert_control_from_agent_to_source(agent_control)
self.world.player.apply_control(carla_control)
self.timestep_counter += 1
self.completed_lap_count = lap_count - 1
except Exception as e:
self.logger.error(f"Error happened, exiting safely. Error: {e}")
finally:
if self.competition_mode and should_restart_lap:
self.restart_on_lap(agent=agent,
use_manual_control=use_manual_control,
starting_lap_count=lap_count - 1)
else:
self.on_finish()
def restart_on_lap(self, agent, use_manual_control: bool, starting_lap_count: int):
self.logger.info(f"Restarting on Lap {starting_lap_count}")
self.on_finish()
self.set_carla_world()
agent.__init__(vehicle=agent.vehicle, agent_settings=agent.agent_settings)
self.start_game_loop(agent=agent, use_manual_control=use_manual_control,
starting_lap_count=starting_lap_count)
def on_finish(self):
self.logger.debug("Ending Game")
if self.agent is not None:
self.agent.shutdown_module_threads()
self.end_vehicle_position = self.agent.vehicle.transform.location.to_array()
else:
self.end_vehicle_position = self.start_vehicle_position
if self.world is not None:
self.end_simulation_time = time.time()
self.world.destroy()
self.logger.debug("All actors are destroyed")
try:
pygame.quit()
except Exception as e:
self.logger.debug(
f"Cannot quit pygame normally, force quitting. Error: {e}")
# # Save Logged Dynamics Data
t = np.linspace(0, self.end_simulation_time - self.start_simulation_time, len(self.position))
d = {'time': t, 'x': self.position[:, 0], 'y': self.position[:, 1], 'z': self.position[:, 2] }
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# Copyright(c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import clr
from System import DateTime
import pandas as pd
import re
from datetime import datetime
from pathlib import Path
clr.AddReference(str(Path("curves/lib/Cmdty.TimePeriodValueTypes")))
from Cmdty.TimePeriodValueTypes import QuarterHour, HalfHour, Hour, Day, Month, Quarter, TimePeriodFactory
FREQ_TO_PERIOD_TYPE = {
"15min" : QuarterHour,
"30min" : HalfHour,
"H" : Hour,
"D" : Day,
"M" : Month,
"Q" : Quarter
}
""" dict of str: .NET time period type.
Each item describes an allowable granularity of curves constructed, as specified by the
freq parameter in the curves public methods.
The keys represent the pandas Offset Alias which describe the granularity, and will generally be used
as the freq of the pandas Series objects returned by the curve construction methods.
The values are the associated .NET time period types used in behind-the-scenes calculations.
"""
def tranform_time_func(freq, py_time_func):
def wrapper_time_func(net_time_period):
pandas_period = net_time_period_to_pandas_period(net_time_period, freq)
return py_time_func(pandas_period)
return wrapper_time_func
def tranform_two_period_func(freq, py_two_period_func):
def wrapper_time_func(net_time_period1, net_time_period2):
pandas_period1 = net_time_period_to_pandas_period(net_time_period1, freq)
pandas_period2 = net_time_period_to_pandas_period(net_time_period2, freq)
return py_two_period_func(pandas_period1, pandas_period2)
return wrapper_time_func
def net_datetime_to_py_datetime(net_datetime):
return datetime(net_datetime.Year, net_datetime.Month, net_datetime.Day, net_datetime.Hour, net_datetime.Minute, net_datetime.Second, net_datetime.Millisecond * 1000)
def net_time_series_to_pandas_series(net_time_series, freq):
"""Converts an instance of class Cmdty.TimeSeries.TimeSeries to a pandas Series"""
curve_start = net_time_series.Indices[0].Start
curve_start_datetime = net_datetime_to_py_datetime(curve_start)
index = pd.period_range(start=curve_start_datetime, freq=freq, periods=net_time_series.Count)
prices = [net_time_series.Data[idx] for idx in range(0, net_time_series.Count)]
return pd.Series(prices, index)
def net_time_period_to_pandas_period(net_time_period, freq):
start_datetime = net_datetime_to_py_datetime(net_time_period.Start)
return | pd.Period(start_datetime, freq=freq) | pandas.Period |
# StaticCell.py
# Author: <NAME>
# Version: 1.0.0
# This code is designed to be imported and run inside a Jupyter notebook using an iPython kernel.
import numpy as np
import pandas as pd
import ALS
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from IPython.display import display, clear_output
df_photolysis_params = {}
df_photolysis_params['C3H3Br'] = {'xsn':1.0e-19, 'product1':'C3H3', 'product2':'Br'}
class StaticCell(ALS.KineticModel):
def __init__(self, user_model):
super().__init__(user_model, err_weight=False, fit_pre_photo=False, apply_IRF=False, apply_PG=False, t_PG=1.0)
def plot_model(self, t_start, t_end, tbin, df_model_params, initial_concentrations, df_photolysis_params, fluence, photolysis_cycles=1, delta_xtick=20.0, save_fn=None):
df_ALS_params = {'t0': 0}
#print(initial_concentrations)
# = df_model_params.to_dict('index').copy()
#print(type(dict_mod_params))
c_0 = initial_concentrations.copy()
t_full = np.array([])
for cycle in range(0, photolysis_cycles):
c_after_photo = c_0.copy()
for species in df_photolysis_params.itertuples():
dC = species.xsn * fluence * c_0[species.Index]
c_after_photo[species.Index] = c_0[species.Index] - dC
for product, qyield in zip(species.products, species.qyields):
c_after_photo[product] = c_0[product] + dC * qyield
#print(c_after_photo)
#for key in c_after_photo:
# dict_mod_params['c_' + key + '_0'] = {'val':c_after_photo[key], 'err':0, 'fit':False}
c_params = {}
for key in c_after_photo:
c_params['c_' + key + '_0'] = {'val':c_after_photo[key], 'err':0, 'fit':False}
df_mod_params = df_model_params.append(pd.DataFrame.from_dict(c_params, orient='index'))
if cycle == 0:
t_model, c_model = self._model(t_start, t_end, tbin, df_mod_params['val'].to_dict(), df_ALS_params)
endtime = 0
else:
t_model, c_model = self._model(0, t_end, tbin, df_mod_params['val'].to_dict(), df_ALS_params)
t_model = t_model + endtime
endtime = t_model[-1]
#print("New endtime", endtime)
t_full = np.concatenate((t_full,t_model[:-1]))
#print("Tfull", t_full)
if cycle == 0:
#c_full = c_model.to_dict('list').copy()
c_full = c_model.copy().iloc[:-1]
else:
c_full = c_full.append(c_model.iloc[:-1], ignore_index=True)
#for key in c_model:
# c_full[key] = np.concatenate((c_full[key], c_model[key]))
#for key in c_model:
# #print(key)
# c_0[key] = list(c_model[key])[-1]
for (species, concentrations) in c_model.iteritems():
#print(species)
#print(type(concentrations))
c_0[species] = concentrations.iloc[-1]
#print(c_0['HO2'])
species_names = list(c_model.columns)
nSpecies = len(species_names)
# Set up the grid of subplots
ncols = 3
nrows = (nSpecies//ncols) if (nSpecies%ncols) == 0 else (nSpecies//ncols)+1
dpi = 120
plt.rc('font', size=9)
plt.rc('axes.formatter', useoffset=False)
f = plt.figure(figsize=(1000/dpi,325*nrows/dpi), dpi=dpi)
gs = gridspec.GridSpec(nrows, ncols, figure=f, hspace=0.45, wspace=0.3, top=0.9, bottom=0.2)
# Determine x-axis ticks
tick_low = (t_start//delta_xtick)*delta_xtick
tick_high = endtime if endtime % delta_xtick == 0. else ((endtime//delta_xtick)+1)*delta_xtick
ticks = np.linspace(tick_low, tick_high, num=round(((tick_high-tick_low)/delta_xtick)+1), endpoint=True)
# Make the subplots
s_model = []
for i, species in enumerate(species_names):
mod = c_full[species]
s_model.append(mod)
j = i // 3 # Row index
k = i % 3 # Col index
ax = plt.subplot(gs[j,k])
ax.ticklabel_format(axis='y', style='sci', scilimits=(0,0))
ax.plot(t_full, mod, linewidth=2)
# Manually set x-axis ticks
ax.set_xticks(ticks)
# Labels
ax.set_title(species, fontweight='bold') # Make the title the species name
ax.set_xlabel('Time (ms)') # Set x-axis label for bottom plot
if k == 0: # Set y-axis labels if plot is in first column
ax.set_ylabel('Concentration ($\mathregular{molc/cm^{3}})$')
plt.show()
# Save the model traces
if save_fn:
df = | pd.DataFrame(s_model) | pandas.DataFrame |
import sys
import mechanize
import pandas as pd
from bs4 import BeautifulSoup
class stockCrawler(object):
"""docstring for ClassName"""
def __init__(self):
self.all_l=[]
self.year = sys.argv[1]
self.month = sys.argv[2]
def crawler(self,year,month):
br = mechanize.Browser()
url = "http://www.twse.com.tw/ch/trading/exchange/STOCK_DAY/genpage/Report"+str(year)+str(month)+"/"+str(year)+str(month)+"_F3_1_8_6214.php?STK_NO=6214&myear="+str(year)+"&mmon="+str(month)
res = br.open(url)
soup = BeautifulSoup(res)
for a in soup.find_all( attrs={"bgcolor": "#FFFFFF"} ):
data_l = []
for t in [0,1,3,4,5,6,8]:
data_l.append(a.contents[t].string)
self.all_l.append(data_l)
def storage(self):
rowname = ['Date','Trade Volume','Opening Price','Highest Price','Lowest Price','Closing Price','Transaction']
df = | pd.DataFrame(self.all_l, columns=rowname) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/dev-01-retrieval.ipynb (unless otherwise specified).
__all__ = ['query_API', 'dict_col_2_cols', 'clean_nested_dict_cols', 'set_dt_idx', 'create_df_dt_rng', 'clean_df_dts',
'retrieve_stream_df', 'check_streams', 'retrieve_streams_df', 'parse_A44_response', 'retreive_DAM_prices',
'parse_A75_response', 'retrieve_production']
# Cell
import json
import numpy as np
import pandas as pd
import os
import requests
import xmltodict
from datetime import date
from warnings import warn
from itertools import product
from dotenv import load_dotenv
from entsoe import EntsoePandasClient, EntsoeRawClient
# Cell
def query_API(start_date:str, end_date:str, stream:str, time_group='30m'):
"""
'Query API' makes the call to Electric Insights and returns the JSON response
Parameters:
start_date: Start date for data given as a string in the form '%Y-%m-%d'
end_date: End date for data given as a string in the form '%Y-%m-%d'
stream: One of 'prices_ahead', 'prices_ahead', 'prices', 'temperatures' or 'emissions'
time_group: One of '30m', '1h', '1d' or '7d'. The default is '30m'
"""
# Checking stream is an EI endpoint
possible_streams = ['prices_ahead', 'prices', 'temperatures', 'emissions', 'generation-mix']
assert stream in possible_streams, f"Stream must be one of {''.join([stream+', ' for stream in possible_streams])[:-2]}"
# Checking time_group will be accepted by API
possible_time_groups = ['30m', '1h', '1d', '7d']
assert time_group in possible_time_groups, f"Time group must be one of {''.join([time_group+', ' for time_group in possible_time_groups])[:-2]}"
# Formatting dates
format_dt = lambda dt: date.strftime(dt, '%Y-%m-%d') if isinstance(dt, date) else dt
start_date = format_dt(start_date)
end_date = format_dt(end_date)
# Running query and parsing response
response = requests.get(f'http://drax-production.herokuapp.com/api/1/{stream}?date_from={start_date}&date_to={end_date}&group_by={time_group}')
r_json = response.json()
return r_json
# Cell
def dict_col_2_cols(df:pd.DataFrame, value_col='value'):
"""Checks the `value_col`, if it contains dictionaries these are transformed into new columns which then replace it"""
## Checks the value col is found in the dataframe
if value_col not in df.columns:
return df
if isinstance(df.loc[0, value_col], dict):
df_values = pd.DataFrame(df[value_col].to_dict()).T
df[df_values.columns] = df_values
df = df.drop(columns=[value_col])
return df
# Cell
def clean_nested_dict_cols(df):
"""Unpacks columns contining nested dictionaries"""
# Calculating columns that are still dictionaries
s_types = df.iloc[0].apply(lambda val: type(val))
cols_with_dicts = s_types[s_types == dict].index
while len(cols_with_dicts) > 0:
for col_with_dicts in cols_with_dicts:
# Extracting dataframes from dictionary columns
df = dict_col_2_cols(df, col_with_dicts)
# Recalculating columns that are still dictionaries
s_types = df.iloc[0].apply(lambda val: type(val))
cols_with_dicts = s_types[s_types == dict].index
return df
# Cell
def set_dt_idx(df:pd.DataFrame, idx_name='local_datetime'):
"""
Converts the start datetime to UK local time, then sets it as the index and removes the original datetime columns
"""
idx_dt = pd.DatetimeIndex(pd.to_datetime(df['start'], utc=True)).tz_convert('Europe/London')
idx_dt.name = idx_name
df.index = idx_dt
df = df.drop(columns=['start', 'end'])
return df
def create_df_dt_rng(start_date, end_date, freq='30T', tz='Europe/London', dt_str_template='%Y-%m-%d'):
"""
Creates a dataframe mapping between local datetimes and electricity market dates/settlement periods
"""
# Creating localised datetime index
s_dt_rng = pd.date_range(start_date, end_date, freq=freq, tz=tz)
s_dt_SP_count = pd.Series(0, index=s_dt_rng).resample('D').count()
# Creating SP column
SPs = []
for num_SPs in list(s_dt_SP_count):
SPs += list(range(1, num_SPs+1))
# Creating datetime dataframe
df_dt_rng = | pd.DataFrame(index=s_dt_rng) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from grimoire.ConfigurationEnginnering import ConfigurationEnginnering
from pandas import DataFrame, Series
class BaseEnginnering(ConfigurationEnginnering):
__slots__ = ('train_X', 'train_y', 'predict_X', 'classes_')
def __init__(self):
super().__init__()
self.train_X = []
self.train_y = []
self.predict_X = []
self.classes_ = {}
def __del__(self):
del self.train_X
del self.train_y
del self.predict_X
del self.classes_
def get_subset(self, columns) -> tuple:
df_subset_x = self.train_X.loc[:, columns]
df_subset_y = self.train_y.loc[df_subset_x.index]
return (df_subset_x, df_subset_y)
def get_param_value(self, param_name: str):
if param_name in self.__slots__:
return self.__slots__[param_name]
elif param_name in self.__dict__:
return self.__dict__[param_name]
else:
raise TypeError('Access property is invalid')
def get_arrangement_features(self, features, n_selected):
from itertools import combinations
if type(n_selected) != int:
raise TypeError('Expected value integer in n_selected')
permsList = list(combinations(features, r=n_selected))
return permsList
def get_pack_nparray(self, elements: list):
import numpy as np
return np.array(elements, np.object)
def get_size_estimator(self, estimator):
from sys import getsizeof
return (getsizeof(estimator) / 1024)
def get_block_fit(self):
from sys import getsizeof
from math import ceil
# in bytes
df_sizeof = getsizeof(self.predict_X)
# number of instances
n_instances = self.predict_X.shape[0]
# size in bytes of instances
instance_sizeof = df_sizeof / n_instances
# number of instance per block
n_per_block = ceil((1024 * 4 * self.chunck) / instance_sizeof)
if n_per_block >= n_instances:
(yield (0, n_instances))
else:
pair_blocks = [((y - n_per_block), (y-1))
for y in range(n_per_block,
n_instances, n_per_block)]
if (pair_blocks[-1][1] < n_instances):
e = ((pair_blocks[-1][1] + 1), (n_instances-1))
pair_blocks.append(e)
for item in pair_blocks:
(yield (item))
def get_transform(self, data_encoder_type=1, target_encoder_type=0):
condition = [self.encoder_enable,
self.encoder_data,
(self.encoder_flag[0] == 0)]
if all(condition):
self.run_encoder_data(data_encoder_type)
encoder_df = DataFrame(index=self.train_X.index)
for col in self.train_X.columns:
index_first_element = self.train_X.index[0]
first_element = self.train_X[col][index_first_element]
if type(first_element) in self.encoder_not_type:
encoder_df.insert(loc=encoder_df.shape[1],
column=col,
value=self.train_X[col])
self.encoder_feature[col] = type(first_element)
else:
df_col = self.train_X.loc[:, [col]]
# reverse list of unique values
# convertendo para um list para facilitar nos
# próximos procedimentos
unique_categories = list(df_col[col].unique()[::-1])
self.encoder_feature[col] = unique_categories
df_tmp = self.encoder_X.fit_transform(df_col)
if (len(df_tmp.shape) == 1):
col_name = '{0}_all'.format(col)
encoder_df.insert(loc=encoder_df.shape[1],
column=col_name,
value=df_tmp)
self.encoder_categorical_columns.append(col_name)
else:
index_shape = range(df_tmp.shape[1])
for i, c in zip(index_shape, unique_categories):
col_name = '{0}_{1}'.format(col, c)
encoder_df.insert(loc=encoder_df.shape[1],
column=col_name,
value=df_tmp[:, i])
self.encoder_categorical_columns.append(col_name)
del self.train_X
self.train_X = encoder_df.copy()
del encoder_df
self.encoder_flag[0] = 1
condition = [self.encoder_enable,
self.encoder_target,
(self.encoder_flag[1] == 0)]
if all(condition):
self.run_encoder_target(target_encoder_type)
encoder_index = self.train_y.index
encoder_values = self.encoder_y.fit_transform(self.train_y)
self.train_y = | Series(data=encoder_values, index=encoder_index) | pandas.Series |
# -*- cding: utf-8 -*-
import os
import numpy as np
import pandas as pd
import pymysql
import datetime
import glob
'''
程序说明:
1.实现能从Mysql数据库读取AIS数据到本地,保存到pandas的dataframe中;
2.实现从本地读取CSV文件,以便于数据处理;
3.利用datetime函数将数据库保存的时间戳转换成具体的时间函数,以便于读取;
4.将轨迹数据进行清洗,并对轨迹数据进行重构,输出符合要求的轨迹数据;
5.形成最终的MMSI唯一性代表的轨迹运动特性的数据,以便于后期深度学习的时候使用。
'''
# 链接数据库的句子
# dbconn = pymysql.connect(host = '127.0.0.1',user = 'root', passwd='<PASSWORD>',db= 'ais_dynamic',charset = 'utf8')
# sql查询语句
# sqlcmd = "select * from ais_dynamic.ais_dynamic limit 100"
# 从CSV文件中读取数据进行处理
# ais_file= pd.read_csv(r'C:\Users\cege-user\Desktop\dataset-ais\1-1000000-ais.csv',header = 0,sep = ' ',names = list('Record_Datetime','MMSI','Longitude','Latitude','Direction',
# 'Heading','Speed','Status','ROT','Position_Accuracy','UTC_Hour',
# 'UTC_Minute','UTC_Second','Message_ID','Rec_Datetime','Source_ID'))
ais_file1 = pd.read_csv(r'D:\Data store file\dataset-ais\1-1000000-ais.csv')
ais_file2 = | pd.read_csv(r'D:\Data store file\dataset-ais\1000001-2000000-ais.csv') | pandas.read_csv |
import os
from multiprocessing import Pool
import pandas as pd
# import rioxarray as rxr
import geopandas as gpd
import fiona
from shapely.geometry import Polygon
from shapely.ops import linemerge
import zipfile
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
processed_data_dir = os.path.join(BASE_DIR, 'processed_data')
vector_save_path = os.path.join(processed_data_dir, 'grouped_hydrographic_features')
if not os.path.exists(vector_save_path):
os.mkdir(vector_save_path)
#
# Using the regrouped hydrologic regions, (process_hydrologic_regions.py),
# group the stream vectors for dem processing
#
def fill_holes(data):
interior_gaps = data.interiors.values.tolist()[0]
group_name = data.index.values[0]
gap_list = []
if interior_gaps is not None:
print(f' ...{len(interior_gaps)} gaps found in {group_name} groupings.')
for i in interior_gaps:
gap_list.append(Polygon(i))
data_gaps = gpd.GeoDataFrame(geometry=gap_list, crs=data.crs)
appended_set = data.append(data_gaps)
appended_set['group'] = 0
merged_polygon = appended_set.dissolve(by='group')
return merged_polygon.geometry.values[0]
else:
print(f' ...no gaps found in {group_name}')
return data.geometry.values[0]
# nhn_path = '/media/danbot/Samsung_T5/geospatial_data/WSC_data/NHN_feature_data/'
nhn_path = '/home/danbot/Documents/code/hysets_validation/source_data/NHN_feature_data/'
nhn_feature_path = os.path.join(nhn_path, 'BC_NHN_features/')
seak_path = os.path.join(nhn_path, 'SEAK_features')
bc_groups_path = os.path.join(processed_data_dir, 'merged_basin_groups/')
bc_groups = gpd.read_file(bc_groups_path + 'BC_transborder_final_regions_4326.geojson')
bc_groups = bc_groups.to_crs(3005)
# 1. get the list of coastal + island regions
coast_groups = [
'08A', '08B', '08C', '08D',
'08E', '08F', '08G', '08M',
'09M'
]
coast_islands = ['08O', '08H']
seak_groups = ['08A', '08B', '08C', '08D']
seak_dict = {
'08A': [19010405, 19010404, 19010403, 19010406],
'08B': [19010301, 19010302, 19010303, 19010304,
19010206, 19010204, 19010212, 19010211],
'08C': [19010210, 19010208, 19010207, 19010205],
'08D': [19010103, 19010209, 19010104, 19010102],
}
# 2. retrieve the polygons associated with the 'region' boundary.
# 3. retrieve littoral / shoreline layers and merge them
# 4. split the region polygon using the line created in step 3.
# 5. discard the sea surface polygon
# 6. save new polygon and use to trim DEM in dem_basin_mapper.py
# collection of individual linestrings for splitting in a
# list and add the polygon lines to it.
# line_split_collection.append(polygon.boundary)
# merged_lines = shapely.ops.linemerge(line_split_collection)
# border_lines = shapely.ops.unary_union(merged_lines)
# decomposition = shapely.ops.polygonize(border_lines)
# load and merge the SEAK files into one gdf
seak_streams_path = os.path.join(nhn_path, 'SEAK_WBDHU8_polygons.geojson')
SEAK_polygons = gpd.read_file(seak_streams_path)
SEAK_polygons = SEAK_polygons.to_crs(3005)
SEAK_files = os.listdir(seak_path)
def retrieve_and_group_layers(feature_path, files, target_crs, target_layer):
dfs = []
all_crs = []
print(f' ...checking features at {feature_path} for layer {target_layer}.')
for file in files:
file_layers = zipfile.ZipFile(os.path.join(feature_path, file)).namelist()
layers = [e for e in file_layers if (target_layer in e) & (e.endswith('.shp'))]
if layers:
for layer in layers:
layer_path = os.path.join(feature_path, file) + f'!{layer}'
df = gpd.read_file(layer_path)
crs = df.crs
print(f' crs={crs}')
if crs not in all_crs:
all_crs.append(crs)
print(f' new crs found: {crs}')
df = df.to_crs(target_crs)
# append the dataframe to the group list
dfs.append(df)
else:
print(f'no target layers found in {file}')
return dfs
all_crs = []
# bc_groups = bc_groups[bc_groups['group_name'] == '08H'].copy()
# print(bc_groups)
target_crs = 3005
bc_groups = bc_groups.to_crs(target_crs)
bc_groups = bc_groups[bc_groups['group_name'].isin(['08B', '08C', '08D'])]
for i, row in bc_groups.iterrows():
grp_code = row['group_name']
sda_codes = row['WSCSDAs']
if sda_codes == None:
sda_codes = [row['group_code'].lower()]
grp_code = row['group_code']
else:
sda_codes = [e.lower() for e in row['WSCSDAs'].split(',')]
print(f'Starting stream vector merge on {grp_code}: {sda_codes}')
nhn_files = [e for e in os.listdir(nhn_feature_path) if e.split('_')[2][:3] in sda_codes]
# there is one sub-sub basin region polygon that has
# a corrupt archive and needs to be filtered out
bad_zip_file_link = 'https://ftp.maps.canada.ca/pub/nrcan_rncan/vector/geobase_nhn_rhn/shp_en/08/nhn_rhn_08nec00_shp_en.zip'
bad_zip_file = bad_zip_file_link.split('/')[-1]
# skip the bad file:
nhn_files_trimmed = [f for f in nhn_files if f != bad_zip_file]
seak_included = False
for target_layer in ['WATERBODY', 'ISLAND', 'NLFLOW', 'LITTORAL',]:
df_list = []
group_stream_layers = []
print(f' Starting merge of {target_layer} features.')
output_folder = os.path.join(vector_save_path, f'{grp_code}/{target_layer}/')
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# use geojson for littoral and island (polygons)
# use .shp for stream network (NLFLOW layer)
output_filename = f'{grp_code}_{target_layer}_{target_crs}.geojson'
if target_layer in ['NLFLOW']:
output_filename = f'{grp_code}_{target_layer}_{target_crs}.shp'
output_filepath = os.path.join(output_folder, output_filename)
if not os.path.exists(output_filepath):
nhn_dfs = retrieve_and_group_layers(nhn_feature_path, nhn_files_trimmed, target_crs, target_layer)
if len(nhn_dfs) == 0:
continue
else:
nhn_gdf = gpd.GeoDataFrame(pd.concat(nhn_dfs, ignore_index=True), crs=target_crs)
print(f' {len(nhn_gdf)} NHN items found.')
# nhn_gdf['intersects_group_polygon'] = gpd.sjoin(gdf, row, how='inner', predicate='contains')
# gdf = gdf[gdf['intersects_group_polygon']].copy()
# print(nhn_gdf.head())
if nhn_gdf.empty:
continue
else:
df_list.append(nhn_gdf)
if (target_layer == 'NLFLOW') & (grp_code in seak_dict.keys()):
huc_codes = [str(e) for e in seak_dict[grp_code]]
print('')
print(f' ...searching for USGS vector info for {grp_code}.')
group_seak_files = []
for h in huc_codes:
files = [f for f in SEAK_files if h in f]
if len(files) > 0:
group_seak_files += files
# there should be as many files as there are codes,
# otherwise a file is missing.
assert len(group_seak_files) == len(seak_dict[grp_code])
# get the southeast alaska hydrographic feature files
seak_dfs = retrieve_and_group_layers(seak_path, group_seak_files, target_crs, 'NHDFlowline')
seak_gdf = gpd.GeoDataFrame( | pd.concat(seak_dfs, ignore_index=True) | pandas.concat |
# import pandas as pd
import logging
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import StandardScaler
logger = logging.getLogger(__name__)
def subtract_shuffled_null_model_from_panel(panel, shift_null_model=False):
"""Standardize a panel dataset by subtracting the expected value in a null
model.
Given a panel dataset of export values, with items as countries,
time as major_axis, and products as minor_axis, create a new panel in which
an expected value in a null model is subtracted from the data. The null
model assumes that a country's exports are allocated to different products
in the same proportions as those products' total exports are compared to
the total exports of all products.
Parameters
----------
panel : pandas Panel
A panel dataset with `items` being the names of different trajectories
(people, countries, etc.), time as `major_axis`, and features as
`minor_axis`.
shift_null_model : bool, optional, default: False
Whether to shift the null model by one time step so that data is not
normalized by data that depends on itself.
Returns
-------
panel_normalized_null_model : pandas Panel
A normalized panel in which an expected value is subtracted from each
entry in `panel`. The new normalized panel is essentially
`panel - (panel.sum(axis='minor_axis') * panel.sum('items') /
panel.sum('items').sum(axis=1)).
"""
panel_normalized_null_model = panel.copy()
sum_across_items = panel.sum('items')
sum_across_items_and_features = panel.sum('items').sum(axis=1)
share_of_each_feature = (
sum_across_items.div(
sum_across_items_and_features, axis='index')
.shift(int(shift_null_model)))
for item in panel.items:
sum_across_features = panel.loc[item].sum(axis=1)
expected = (
share_of_each_feature).mul(
sum_across_features, axis='index')
panel_normalized_null_model.loc[item] -= expected
return panel_normalized_null_model
class IteratedLog1p(BaseEstimator, TransformerMixin):
"""Transforms features by applying log1p a certain number of times.
Parameters
----------
n : int, default: 1
The number of times to apply numpy.log1p to the data
"""
def __init__(self, n=1, pseudolog=False):
if n < 0:
raise ValueError('`n` must be positive; got {}'.format(n))
self.n = n
def _transformed_filename(self, filename):
if self.n == 1:
return 'log1p_{}'.format(filename)
else:
return 'log1p_applied_{}_times_to_{}'.format(self.n, filename)
def _transformed_name(self, name):
if self.n == 1:
return 'log(1 + {})'.format(name)
else:
return r'log1p^{' + str(self.n) + '}' + '(' + name + ')'
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
"""Apply `numpy.log1p` to `X` `n` times."""
result = X.copy()
for __ in range(self.n):
result = np.log1p(result)
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(result, index=X.index, columns=X.columns)
if hasattr(X, 'name'):
result.name = self._transformed_name(X.name)
if hasattr(X, 'filename'):
result.filename = self._transformed_filename(X.filename)
return result
def inverse_transform(self, X):
"""Apply `np.exp(X) - 1` `n` times."""
result = X.copy()
for __ in range(self.n):
result = np.exp(X) - 1.0
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(result, index=X.index, columns=X.columns)
return result
class PseudoLog(BaseEstimator, TransformerMixin):
"""Transforms features by applying arcsinh(x / 2).
"""
def __init__(self):
pass
def _transformed_filename(self, filename):
return 'pseudolog_{}'.format(filename)
def _transformed_name(self, name):
return 'pseudolog(1 + {})'.format(name)
def _transformed_math_name(self, name):
return 'arcsinh({} / 2)'.format(name)
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
"""Apply `arcsinh(x / 2)` to `X`."""
result = np.arcsinh(X / 2.0)
if isinstance(X, pd.DataFrame):
result = pd.DataFrame(result, index=X.index, columns=X.columns)
if hasattr(X, 'name'):
result.name = self._transformed_name(X.name)
if hasattr(X, 'filename'):
result.filename = self._transformed_filename(X.filename)
return result
def inverse_transform(self, X):
"""Apply `np.exp(X) - 1` `n` times."""
result = 2.0 * np.sinh(X)
if isinstance(X, pd.DataFrame):
result = | pd.DataFrame(result, index=X.index, columns=X.columns) | pandas.DataFrame |
import tkinter
import pandas
import pandas.errors
import random
import tkinter.messagebox
SAVE_FILE = "data/save.csv"
try:
vocabulary_data = pandas.read_csv(SAVE_FILE)
except (FileNotFoundError, pandas.errors.EmptyDataError):
vocabulary_data = pandas.read_csv("data/sunda-english.csv")
LANG_FRONT = vocabulary_data.columns[0]
LANG_BACK = vocabulary_data.columns[1]
cards = vocabulary_data.to_dict('records')
current_card = None
timer = None
def count_down():
global timer
timer = window.after(3000, flip_card, current_card)
def flip_card(card):
canvas_card.itemconfigure(front_image_canvas, state="hidden")
canvas_card.itemconfigure(back_image_canvas, state="normal")
canvas_card.itemconfigure(lang_text, text=LANG_BACK)
canvas_card.itemconfigure(vocab_text, text=card[LANG_BACK])
def get_new_card():
global current_card
current_card = None
try:
card = random.choice(cards)
except IndexError:
tkinter.messagebox.showinfo("Finish", "All cards have been answered correctly!")
return
else:
current_card = card
canvas_card.itemconfigure(front_image_canvas, state="normal")
canvas_card.itemconfigure(back_image_canvas, state="hidden")
canvas_card.itemconfigure(lang_text, text=LANG_FRONT)
canvas_card.itemconfigure(vocab_text, text=current_card[LANG_FRONT])
count_down()
def right_command():
window.after_cancel(timer)
if current_card is not None:
cards.remove(current_card)
get_new_card()
def wrong_command():
window.after_cancel(timer)
if current_card is not None:
cards.remove(current_card)
cards.append(current_card)
get_new_card()
def on_closing():
save_data = | pandas.DataFrame(cards) | pandas.DataFrame |
#-- -- -- -- Introduction to Importing Data in Python
# Used for Data Scientist Training Path
#FYI it's a compilation of how to work
#with different commands.
### --------------------------------------------------------
# # ------>>>>> Exploring your working directory
# In order to import data into Python, you should first
# have an idea of what files are in your working directory.
# IPython, which is running on DataCamp's servers,
# has a bunch of cool commands, including its magic
# commands. For example, starting a line with ! gives
# you complete system shell access. This means that the
# IPython magic command ! ls will display the contents of
# your current directory. Your task is to use the IPython
# magic command ! ls to check out the contents of your
# current directory and answer the following question:
# which of the following files is in your working directory?
# R/ moby_dick.txt
### --------------------------------------------------------
# # ------>>>>> Importing entire text files
# Open a file: file
file = open('moby_dick.txt', mode='r')
# Print it
print(file.read())
# Check whether file is closed
print(file.closed)
# Close file
file.close()
# Check whether file is closed
print(file.closed)
### --------------------------------------------------------
# # ------>>>>> Importing text files line by line
# Read & print the first 3 lines
with open('moby_dick.txt') as file:
print(file.readline())
print(file.readline())
print(file.readline())
### --------------------------------------------------------
# # ------>>>>> Pop quiz: examples of flat files
# You're now well-versed in importing text files and
# you're about to become a wiz at importing flat files.
# But can you remember exactly what a flat file is? Test
# your knowledge by answering the following question:
# which of these file types below is NOT an example of a flat file?
# R/ A relational database (e.g. PostgreSQL).
### --------------------------------------------------------
# # ------>>>>>Pop quiz: what exactly are flat files?
# Which of the following statements about flat files is incorrect?
# Flat files consist of rows and each row is called a record.
# Flat files consist of multiple tables with structured
# relationships between the tables.
# A record in a flat file is composed of fields or
# attributes, each of which contains at most one item of information.
# Flat files are pervasive in data science.
# R/ Flat files consist of multiple tables with structured relationships between the tables.
### --------------------------------------------------------
# # ------>>>>>Why we like flat files and the Zen of Python
# In PythonLand, there are currently hundreds of Python
# Enhancement Proposals, commonly referred to as PEPs. PEP8, for example,
# is a standard style guide for Python, written by our sensei <NAME>
# Rossum himself. It is the basis for how we here at DataCamp ask our
# instructors to style their code. Another one of my favorites is PEP20,
# commonly called the Zen of Python. Its abstract is as follows:
# Long time Pythoneer <NAME> succinctly channels the BDFL's guiding
# principles for Python's design into 20 aphorisms, only 19 of which have
# been written down.
# If you don't know what the acronym BDFL stands for, I suggest that you
# look here. You can print the Zen of Python in your shell by typing import
# this into it! You're going to do this now and the 5th aphorism (line)
# will say something of particular interest.
# The question you need to answer is: what is the 5th aphorism of the Zen of Python?
# R/ -- > command: import this
# Flat is better than nested.
### --------------------------------------------------------
# # ------>>>>> Using NumPy to import flat files
# Import package
import numpy as np
import matplotlib.pyplot as plt
# Assign filename to variable: file
file = 'digits.csv'
# Load file as array: digits
digits = np.loadtxt(file, delimiter=',')
# Print datatype of digits
print(type(digits))
# Select and reshape a row
im = digits[21, 1:]
im_sq = np.reshape(im, (28, 28))
# Plot reshaped data (matplotlib.pyplot already loaded as plt)
plt.imshow(im_sq, cmap='Greys', interpolation='nearest')
plt.show()
### --------------------------------------------------------
# # ------>>>>> Customizing your NumPy import
# Import numpy
import numpy as np
# Assign the filename: file
file = 'digits_header.txt'
# Load the data: data
data = np.loadtxt(file, delimiter='\t', skiprows=1, usecols=[0, 2])
# Print data
print(data)
### --------------------------------------------------------
# # ------>>>>> Importing different datatypes
import numpy as np
import matplotlib.pyplot as plt
# Assign filename: file
file = 'seaslug.txt'
# Import file: data
data = np.loadtxt(file, delimiter='\t', dtype=str)
# Print the first element of data
print(data[0])
# Import data as floats and skip the first row: data_float
data_float = np.loadtxt(file, delimiter='\t', dtype=float, skiprows=1)
# Print the 10th element of data_float
print(data_float[9])
# Plot a scatterplot of the data
plt.scatter(data_float[:, 0], data_float[:, 1])
plt.xlabel('time (min.)')
plt.ylabel('percentage of larvae')
plt.show()
### --------------------------------------------------------
# # ------>>>>> Working with mixed datatypes (1)
# Much of the time you will need to import datasets which have
# different datatypes in different columns; one column may contain
# strings and another floats, for example. The function np.loadtxt()
# will freak at this. There is another function, np.genfromtxt(),
# which can handle such structures. If we pass dtype=None to it, it
# will figure out what types each column should be.
# Import 'titanic.csv' using the function np.genfromtxt() as follows:
# data = np.genfromtxt('titanic.csv', delimiter=',', names=True, dtype=None)
# Here, the first argument is the filename, the second specifies the delimiter,
# and the third argument names tells us there is a header. Because the data are
# of different types, data is an object called a structured array. Because numpy
# arrays have to contain elements that are all the same type, the structured array
# solves this by being a 1D array, where each element of the array is a row of the
# flat file imported. You can test this by checking out the array's shape in the
# shell by executing np.shape(data).
# Accessing rows and columns of structured arrays is super-intuitive: to get the
# ith row, merely execute data[i] and to get the column with name 'Fare', execute data['Fare'].
# After importing the Titanic data as a structured array (as per the instructions above),
# print the entire column with the name Survived to the shell. What are the last
# 4 values of this column?
# R/ 1,0,1,0
### --------------------------------------------------------
# # ------>>>>> Working with mixed datatypes (2)
# Assign the filename: file
file = 'titanic.csv'
# Import file using np.recfromcsv: d
d = np.recfromcsv(file)
# Print out first three entries of d
print(d[:3])
### --------------------------------------------------------
# # ------>>>>> Using pandas to import flat files as DataFrames (1)
# Import pandas as pd
import pandas as pd
# Assign the filename: file
file = 'titanic.csv'
# Read the file into a DataFrame: df
df = pd.read_csv(file)
# View the head of the DataFrame
print(df.head())
### --------------------------------------------------------
# # ------>>>>> Using pandas to import flat files as DataFrames (2)
# Assign the filename: file
file = 'digits.csv'
# Read the first 5 rows of the file into a DataFrame: data
data = pd.read_csv(file, nrows=5, header=None)
# Build a numpy array from the DataFrame: data_array
data_array = data.values
# Print the datatype of data_array to the shell
print(type(data_array))
### --------------------------------------------------------
# # ------>>>>> Customizing your pandas import
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Assign filename: file
file = 'titanic_corrupt.txt'
# Import file: data
data = pd.read_csv(file, sep='\t', comment='#', na_values='Nothing')
# Print the head of the DataFrame
print(data.head())
# Plot 'Age' variable in a histogram
pd.DataFrame.hist(data[['Age']])
plt.xlabel('Age (years)')
plt.ylabel('count')
plt.show()
### --------------------------------------------------------
# # ------>>>>> Not so flat any more
# In Chapter 1, you learned how to use the IPython magic command !
# ls to explore your current working directory. You can
# also do this natively in Python using the library os, which
# consists of miscellaneous operating system interfaces.
# The first line of the following code imports the library os,
# the second line stores the name of the current directory in a
# string called wd and the third outputs the contents of the directory in a list to the shell.
# import os
# wd = os.getcwd()
# os.listdir(wd)
# Run this code in the IPython shell and answer the
# following questions. Ignore the files that begin with .
# Check out the contents of your current directory and answer the following
# questions:
# (1) which file is in your directory and NOT an example of a flat file;
# (2) why is it not a flat file?
# R/ battledeath.xlsx is not a flat because it is a spreadsheet consisting of many sheets, not a single table.
### --------------------------------------------------------
# # ------>>>>> Loading a pickled file
# Import pickle package
import pickle
# Open pickle file and load data: d
with open('data.pkl', 'rb') as file:
d = pickle.load(file)
# Print d
print(d)
# Print datatype of d
print(type(d))
### --------------------------------------------------------
# # ------>>>>> Listing sheets in Excel files
# Import pandas
import pandas as pd
# Assign spreadsheet filename: file
file = 'battledeath.xlsx'
# Load spreadsheet: xls
xls = pd.ExcelFile(file)
# Print sheet names
print(xls.sheet_names)
### --------------------------------------------------------
# # ------>>>>> Importing sheets from Excel files
# Load a sheet into a DataFrame by name: df1
df1 = xls.parse('2004')
# Print the head of the DataFrame df1
print(df1.head())
# Load a sheet into a DataFrame by index: df2
df2 = xls.parse(0)
# Print the head of the DataFrame df2
print(df2.head())
### --------------------------------------------------------
# # ------>>>>> Customizing your spreadsheet import
# Import pandas
import pandas as pd
# Assign spreadsheet filename: file
file = 'battledeath.xlsx'
# Load spreadsheet: xl
xls = | pd.ExcelFile(file) | pandas.ExcelFile |
import pandas as pd
from bokeh.plotting import figure, show, output_notebook, gridplot
from bokeh.palettes import brewer
from collections import OrderedDict
from bokeh.models import HoverTool
import numpy as np
from bokeh.models import ColumnDataSource, Range1d, FactorRange
from datetime import datetime
# BREWER_PLOT
def brewer_plot(defata, instruments_all, instruments=None):
"""
This function shows two bokeh brewer plots into the ipython notebook using
the data given as a parameter. In the second one only the instruments
given in the third parameter are plotted. In the first one all of them
are plotted.
:param data: power_avg table
:type data: pandas DataFrame
:param instruments_all: All the instruments in the power_avg file
:type instruments_all: List of strings
:param instruments: Instruments to be plotted in the second plot
:type instruments: List of strings
:returns: Nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
# Creating both figures
big_figure = create_plot(data, instruments_all)
small_figure = create_plot(data, instruments, big_figure.x_range)
# Plotting them together
p = gridplot([[big_figure], [small_figure]])
show(p)
def create_plot(data, instruments, x_range=None):
"""
This function creates a plot given a power_avg table and the instruments
to be plotted. Optionally an x_range to be linked to another plot can be
passed as a parameter.
:param data: module_states or modes table
:type data: pandas DataFrame
:param instruments: Instruments to be plotted
:type instruments: List of strings
:param x_range: x_range to be linked with
:type x_range: figure x_range
:returns: bokeh figure
"""
# Create a set of tools to use
tools = "resize,hover,save,pan,box_zoom,wheel_zoom,reset"
# Creating the areas to be plotted
areas = stacked(data, instruments)
# Selecting the colors for the calculated areas
colors = palette(len(areas))
# Stacking the values of each instrument
x2 = np.hstack((data.index.values[::-1], data.index.values))
# Creating the figure
if x_range is None:
f = figure(x_axis_label=data.index.name, y_axis_label='Watts',
x_axis_type="datetime", tools=tools, logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)))
else:
f = figure(x_axis_label=data.index.name, y_axis_label='Watts',
x_axis_type="datetime", x_range=x_range, tools=tools,
logo=None)
for pos in range(len(colors)):
f.patch(x2, list(areas.values())[pos], color=colors[pos],
legend=instruments[pos], line_color=None, alpha=0.8)
# Setting the color of the line of the background
f.grid.minor_grid_line_color = '#eeeeee'
return f
def palette(number):
"""
This function returns a palette of hex colors of size number.
:param number: Amount of different colors needed
:type number: integer
:returns: list of strings
"""
if number > 40:
print ("Ooops, too many parameters, not enough colors...")
# Selecting the colors from different bokeh palettes
palette = brewer["Spectral"][11]
palette += list(reversed(brewer["RdBu"][11]))
palette += brewer["YlGnBu"][9]
palette += list(reversed(brewer["YlGn"][9]))
palette += brewer["PiYG"][11]
return palette[:number]
def stacked(df, categories):
"""
This function stacks all the power information for each instrument.
:param df: power_avg pandas DataFrame
:type df: pandas DataFrame
:param categories: categories in which the plot is going to be divided
:type categories: list of values
:returns: pandas DataFrame
"""
areas = OrderedDict()
last = np.zeros(len(df[categories[0]]))
for cat in categories:
next = last + df[cat]
areas[cat] = np.hstack((last[::-1], next))
last = next
return areas
# MODES_SCHEDULE
def modes_schedule(data):
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_modes_schedule(data))
def get_modes_schedule(data, x_range=None):
"""
This function create a time line plot based on the data form modes or
module_states files.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: Nothing
"""
# Adding new column to see which instruments are changing in each entry
data = add_difference_column(data)
# Building a new table to make the data plotable by bokeh
start_end_table = build_start_end_table(data)
source = ColumnDataSource(start_end_table)
# Selecting the instruments detected in the data
instruments = [colum for colum in data if colum.upper() == colum]
instruments.sort(reverse=True)
# Creating the figure
if x_range is None:
p = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(start_end_table['Start_time']),
max(start_end_table['End_time'])),
y_range=FactorRange(factors=instruments),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
p = figure(x_axis_type="datetime", logo=None,
x_range=x_range,
y_range=FactorRange(factors=instruments),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
p.quad(left='Start_time', right='End_time', top='Instrument_top',
bottom='Instrument_bottom', color='Color', source=source)
# Adding the hover tool to see info when putting the mouse over the plot
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('Mode', '@Mode'),
('Time', '@Time')
])
return p
def add_difference_column(data):
"""
This function returns the same pandas DataFrame that it receives as a
parameter but with a new column, which contains which instrument has
changed since the last recorded state in the table.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: pandas DataFrame
"""
# We create a list of lists, which will be the new column to add to to data
difference = [[]]
# We take the first row of the table to have the starting values
data_aux = data.transpose()
prev_row = data_aux[data_aux.columns.values[0]]
difference[0] = [element for element in prev_row.index
if prev_row[element] is not None]
# For each entry in the table we detect which instruments are changig
# since the previous row
pos = 0
for row in data_aux:
for element in data_aux.index:
if not prev_row[element] == data_aux[row][element]:
if not len(difference) == pos + 1:
difference.append([element])
else:
difference[pos].append(element)
if not len(difference) == pos + 1:
difference.append([])
prev_row = data_aux[row]
pos += 1
# Finally we add the calculated column
data["Change"] = difference
return data
def build_start_end_table(data):
"""
This function returns a pandas DataFrame which will be used to make a Bokeh
directly from it. This DataFrame will be created from the data received as
a parameter.
:param data: module_states or modes table
:type data: pandas DataFrame
:returns: pandas DataFrame
"""
# Creating the DataFrame manually
di = {"End_time": [], "Instrument": [],
"Mode": [], "Start_time": [], "Time": []}
# Filling the new DataFrame with the instrument, mode and start time
data_aux = data.transpose()
for row in data_aux:
row_t = data_aux[row].transpose()
for instrument in row_t["Change"]:
di["End_time"].append(None)
di["Instrument"].append(instrument)
di["Mode"].append(row_t[instrument])
di["Start_time"].append(row)
di["Time"] = [str(x) for x in di["Start_time"]]
df = pd.DataFrame(di)
df = df.sort(["Start_time"], ascending=True)
instruments = [colum for colum in data if colum.upper() == colum]
# Calculating and adding the end time for each task
for ins in instruments:
shift = df.loc[df["Instrument"] == ins].shift(-1)
if len(shift) > 1:
for i in range(len(shift.index.values)):
di["End_time"][shift.index.values[i]] = \
shift["Start_time"][shift.index.values[i]]
df = pd.DataFrame(di)
# Calculating and adding the end time for tasks without unespecified end
for pos in range(len(df["End_time"])):
if not type(df["End_time"][pos]) is pd.tslib.Timestamp:
df.loc[pos, "End_time"] = df["Start_time"].max()
# Deleting OFF states, we don't want to plot it
df = df[df.Mode != "OFF"]
df[["End_time", "Start_time"]] = \
df[["End_time", "Start_time"]].astype(datetime)
# Creating new rows needed for making the bars wider in the plot
df["Instrument_bottom"] = [row + ":0.25" if " " in row else row + ":0.1"
for row in df["Instrument"].values]
df["Instrument_top"] = [row + ":0.75" if " " in row else row + ":0.9" for
row in df["Instrument"].values]
# Setting different colors for each different mode in the DataFrame
modes = df["Mode"].unique()
colors = dict(zip(modes, palette(len(modes))))
df["Color"] = [colors[row] for row in df["Mode"].values]
return df
# DATA_PLOT
def data_plot(data, instruments):
"""
This function shows a data plot in the ipython notebook using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_data_plot(data, instruments))
def get_data_plot(data, instruments, x_range=None):
"""
This function returns a data rate plot bokeh figure using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:param x_range: x_range from another figure to link with
:type x_range: x_range bokeh format
:returns: bokeh figure
"""
# Creating the figure depending if we want to link it to another figure
if x_range is None:
r = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
r = figure(x_axis_type="datetime", x_range=x_range, logo=None,
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
# Getting the appropiate list of colors
colors = palette(len(instruments))
i = 0
# Transforming the multiindex dataframe into a normal one to use hover tool
d = transform_multiindex_df(data, instruments)
# Inserting the lines in the plot
for ins in instruments:
r.line(d['index'], d[ins[0] + "_" + ins[1]], color=colors[i],
legend=ins[0] + " - " + ins[1], line_width=3)
# I don't know why, but if this source is not rebuilt every single
# time, it doesn't plot correctly
source = ColumnDataSource(d)
# WARNING: THIS IS A HACK
# Hover tool doesn't work over lines show I have created points
r.scatter(d['index'], d[ins[0] + "_" + ins[1]], color=colors[i],
source=source, fill_color=None, size=8)
i += 1
r.title = "Data Rate"
r.grid.grid_line_alpha = 0.3
# Adding the hover tool to see info when putting the mouse over the plot
hover = r.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([("Time", "@Time")] +
[(ins[0] + " - " + ins[1], "@" + ins[0] + "_"
+ ins[1]) for ins in instruments])
return r
def transform_multiindex_df(data, instruments):
"""
This function returns a pandas DataFrame without a multiindex and prepared
to be plotted and used by the hover tool when converted to the proper
format.
:param data: power usage pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: pandas DataFrame
"""
d = {}
d['Time'] = [str(x) for x in pd.to_datetime(data.index.values)]
d['index'] = data.index.values
for ins in instruments:
d[ins[0] + "_" + ins[1]] = \
[x[0] for x in data[ins[0], ins[1]].values.tolist()]
df = pd.DataFrame(d)
return df
# POWER_PLOT
def power_plot(data, instruments):
"""
This function shows a power plot in the ipython notebook using the given
data for the given instruments.
:param data: power usage pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:returns: nothing
"""
# Hidding anoying warnings on the top of the plot
output_notebook(hide_banner=True)
show(get_power_plot(data, instruments))
def get_power_plot(data, instruments, x_range=None):
"""
This function returns a power plot bokeh figure using the given
data for the given instruments.
:param data: data_rate pandas DataFrame
:type data: pandas DataFrame
:param instruments: list of the instruments to plot
:type instruments: list of strings
:param x_range: x_range from another figure to link with
:type x_range: x_range bokeh format
:returns: bokeh figure
"""
# Creating the figure depending if we want to link it to another figure
if x_range is None:
r = figure(x_axis_type="datetime", logo=None,
x_range=Range1d(min(data.index.values),
max(data.index.values)),
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
else:
r = figure(x_axis_type="datetime", x_range=x_range, logo=None,
tools="resize,hover,save,pan,box_zoom,wheel_zoom,reset")
# Getting the appropiate list of colors
colors = palette(len(instruments))
i = 0
# Preparing a set of data to convert into a source for the hover tool
d = data.copy(deep=True)
d['Time'] = [str(x) for x in | pd.to_datetime(data.index.values) | pandas.to_datetime |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.