prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import pandas as pd
import numpy as np
e2 = pd.read_csv('test_prs.sscore', sep='\t')
e = | pd.read_parquet('test_prs.parquet') | pandas.read_parquet |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 22 12:00:37 2020
@author: tianyu
"""
import numpy as np
import pandas as pd
import scipy.sparse as sp
import torch
from sklearn.preprocessing import Normalizer
import math
from torch.autograd import Variable
import torch.nn.functional as F
import torch.utils.data as Data
from sklearn.metrics.pairwise import euclidean_distances
import os
from sklearn import preprocessing
from sklearn import linear_model
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
#path = '/Users/tianyu/Google Drive/fasttext/gcn/pygcn-master/data/cora/'
#dataset = 'cora'
def high_var_dfdata_gene(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar) #small --> big
if gene is None and ind is False:
return data.iloc[ind_maxvar[:num]]
if ind:
return data.iloc[ind_maxvar[:num]], ind_maxvar[:num]
ind_gene = data.index.values[ind_maxvar[:num]]
return data.iloc[ind_maxvar[:num]],gene.loc[ind_gene]
def high_var_dfdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data.iloc[ind_maxvar[:num]]
if ind:
return data.iloc[gene_ind], gene_ind
return data.iloc[gene_ind],gene.iloc[gene_ind]
def high_var_npdata(data, num, gene = None, ind=False): #data: gene*cell
dat = np.asarray(data)
datavar = np.var(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# geneind2 = np.random.choice(ind_maxvar[num//2:], size = num//2, replace = False)
# gene_ind = np.concatenate((gene_ind, geneind2))
#np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def high_tfIdf_npdata(data,tfIdf, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.var(tfIdf, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def high_expr_dfdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.sum(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data.iloc[gene_ind]
if ind:
return data.iloc[gene_ind], gene_ind
return data.iloc[gene_ind],gene.iloc[gene_ind]
def high_expr_npdata(data, num, gene = None, ind=False):
dat = np.asarray(data)
datavar = np.sum(dat, axis = 1)*(-1)
ind_maxvar = np.argsort(datavar)
gene_ind = ind_maxvar[:num]
# np.random.shuffle(gene_ind)
if gene is None and ind is False:
return data[gene_ind]
if ind:
return data[gene_ind],gene_ind
return data[gene_ind],gene.iloc[gene_ind]
def get_rank_gene(OutputDir, dataset):
gene = pd.read_csv(OutputDir+dataset+'/rank_genes_dropouts_'+dataset+'.csv')
return gene
def rank_gene_dropouts(data, OutputDir, dataset):
# data: n_cell * n_gene
genes = np.zeros([np.shape(data)[1],1], dtype = '>U10')
train = pd.DataFrame(data)
train.columns = np.arange(len(train.columns))
# rank genes training set
dropout = (train == 0).sum(axis='rows') # n_gene * 1
dropout = (dropout / train.shape[0]) * 100
mean = train.mean(axis='rows') # n_gene * 1
notzero = np.where((np.array(mean) > 0) & (np.array(dropout) > 0))[0]
zero = np.where(~((np.array(mean) > 0) & (np.array(dropout) > 0)))[0]
train_notzero = train.iloc[:,notzero]
train_zero = train.iloc[:,zero]
zero_genes = train_zero.columns
dropout = dropout.iloc[notzero]
mean = mean.iloc[notzero]
dropout = np.log2(np.array(dropout)).reshape(-1,1)
mean = np.array(mean).reshape(-1,1)
reg = linear_model.LinearRegression()
reg.fit(mean,dropout)
residuals = dropout - reg.predict(mean)
residuals = pd.Series(np.array(residuals).ravel(),index=train_notzero.columns) # n_gene * 1
residuals = residuals.sort_values(ascending=False)
sorted_genes = residuals.index
sorted_genes = sorted_genes.append(zero_genes)
genes[:,0] = sorted_genes.values
genes = pd.DataFrame(genes)
genes.to_csv(OutputDir + dataset + "/rank_genes_dropouts_" + dataset + ".csv", index = False)
def data_noise(data): # data is samples*genes
for i in range(data.shape[0]):
#drop_index = np.random.choice(train_data.shape[1], 500, replace=False)
#train_data[i, drop_index] = 0
target_dims = data.shape[1]
noise = np.random.rand(target_dims)/10.0
data[i] = data[i] + noise
return data
def norm_max(data):
data = np.asarray(data)
max_data = np.max([np.absolute(np.min(data)), np.max(data)])
data = data/max_data
return data
def findDuplicated(df):
df = df.T
idx = df.index.str.upper()
filter1 = idx.duplicated(keep = 'first')
print('duplicated rows:',np.where(filter1 == True)[0])
indd = np.where(filter1 == False)[0]
df = df.iloc[indd]
return df.T
# In[]:
def load_labels(path, dataset):
labels = pd.read_csv(os.path.join(path + dataset) +'/Labels.csv',index_col = None)
labels.columns = ['V1']
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return labels
def load_usoskin(path = '/Users/tianyu/google drive/fasttext/imputation/', dataset='usoskin', net='String'):
# path = os.path.join('/Users',user,'google drive/fasttext/imputation')
data = pd.read_csv(os.path.join(path, dataset, 'data_13776.csv'), index_col = 0)
# adj = sp.load_npz(os.path.join(path, dataset, 'adj13776.npz'))
print(data.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(13776)+'.npz')
print(adj.shape)
labels = pd.read_csv(path +'/' +dataset +'/data_labels.csv',index_col = 0)
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return adj, np.asarray(data), labels
def load_kolod(path = '/Users/tianyu/google drive/fasttext/imputation/', dataset='kolod', net='pcc'):
# path = os.path.join('/Users',user,'google drive/fasttext/imputation')
data = pd.read_csv(os.path.join(path, dataset, 'kolod.csv'), index_col = 0)
# adj = sp.load_npz(os.path.join(path, dataset, 'adj13776.npz'))
print(data.shape)
adj = np.corrcoef(np.asarray(data))
#adj[np.where(adj < 0.3)] = 0
labels = pd.read_csv(path +'/' +dataset +'/kolod_labels.csv',index_col = 0)
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
return adj, np.asarray(data), labels
def load_largesc(path = '/Users/tianyu/Desktop/scRNAseq_Benchmark_datasets/Intra-dataset/', dataset='Zhengsorted',net='String'):
if dataset == 'Zhengsorted':
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_DownSampled_SortedPBMC_data.csv',index_col = 0, header = 0)
elif dataset == 'TM':
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_TM_data.csv',index_col = 0, header = 0)
elif dataset == 'Xin':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_Xin_HumanPancreas_data.csv',index_col = 0, header = 0)
elif dataset == 'BaronHuman':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_Baron_HumanPancreas_data.csv',index_col = 0, header = 0)
elif dataset == 'BaronMouse':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_MousePancreas_data.csv',index_col = 0, header = 0)
elif dataset == 'Muraro':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_Muraro_HumanPancreas_data_renameCols.csv',index_col = 0, header = 0)
elif dataset == 'Segerstolpe':
#path = os.path.join(path, 'Pancreatic_data/')
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_Segerstolpe_HumanPancreas_data.csv',index_col = 0, header = 0)
elif dataset == 'AMB':
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_mouse_allen_brain_data.csv',index_col = 0, header = 0)
features = findDuplicated(features)
print(features.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(features.T.shape[0])+'.npz')
print(adj.shape)
shuffle_index = np.loadtxt(os.path.join(path + dataset) +'/shuffle_index_'+dataset+'.txt')
labels = pd.read_csv(os.path.join(path + dataset) +'/Labels.csv',index_col = None)
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['Class']))}
labels['Class'] = labels['Class'].map(class_mapping)
del class_mapping
labels = np.asarray(labels.iloc[:,0]).reshape(-1)
return adj, np.asarray(features.T), labels,shuffle_index
elif dataset == 'Zheng68K':
features = pd.read_csv(os.path.join(path + dataset) +'/Filtered_68K_PBMC_data.csv',index_col = 0, header = 0)
elif dataset == '10x_5cl':
path = os.path.join(path, 'CellBench/')
features = pd.read_csv(os.path.join(path + dataset) +'/10x_5cl_data.csv',index_col = 0, header = 0)
elif dataset == 'CelSeq2_5cl':
path = os.path.join(path, 'CellBench/')
features = pd.read_csv(os.path.join(path + dataset) +'/CelSeq2_5cl_data.csv',index_col = 0, header = 0)
features = findDuplicated(features)
print(features.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(features.T.shape[0])+'.npz')
print(adj.shape)
labels = load_labels(path, dataset)
shuffle_index = np.loadtxt(os.path.join(path + dataset) +'/shuffle_index_'+dataset+'.txt')
return adj, np.asarray(features.T), labels,shuffle_index
# In[]:
def load_inter(path = '/Users/tianyu/Desktop/scRNAseq_Benchmark_datasets/Inter-dataset/', dataset='CellBench',net='String'):
if dataset == 'CellBench':
features = pd.read_csv(os.path.join(path + dataset) +'/Combined_10x_CelSeq2_5cl_data.csv',index_col = 0, header = 0)
features = findDuplicated(features)
print(features.shape)
adj = sp.load_npz(os.path.join(path + dataset) + '/adj'+ net + dataset + '_'+str(features.T.shape[0])+'.npz')
print(adj.shape)
labels = load_labels(path, dataset)
return adj, np.asarray(features.T), labels, None
# In[]:
def load_pancreas(path = '/Users/tianyu/Desktop/scRNAseq_Benchmark_datasets/Intra-dataset/', dataset='',net='String'):
##############
xin = pd.read_csv(os.path.join(path + 'Xin') +'/Filtered_Xin_HumanPancreas_data.csv',index_col = 0, header = 0)
bh = pd.read_csv(os.path.join(path + 'BaronHuman') +'/Filtered_Baron_HumanPancreas_data.csv',index_col = 0, header = 0)
mu = pd.read_csv(os.path.join(path + 'Muraro') +'/Filtered_Muraro_HumanPancreas_data_renameCols.csv',index_col = 0, header = 0)
se = pd.read_csv(os.path.join(path + 'Segerstolpe') +'/Filtered_Segerstolpe_HumanPancreas_data.csv',index_col = 0, header = 0)
gene_set = list(set(xin.columns)&set(bh.columns)&set(mu.columns)&set(se.columns))
gene_set.sort()
gene_index_bh = [i for i, e in enumerate(bh.columns) if e in gene_set]
xin = xin[gene_set]
bh = bh[gene_set]
mu = mu[gene_set]
se = se[gene_set]
mu = np.log1p(mu)
se = np.log1p(se)
bh = np.log1p(bh)
xin = np.log1p(xin)
# indexXin = xin.index.to_list()
# indexMu = mu.index.to_list()
# indexSe = se.index.to_list()
# indexBh = bh.index.to_list()
min_max_scaler = preprocessing.MinMaxScaler()
temp = min_max_scaler.fit_transform(np.asarray(mu))
mu = pd.DataFrame(temp, index = mu.index, columns = mu.columns)
temp = min_max_scaler.fit_transform(np.asarray(se))
se = pd.DataFrame(temp, index = se.index, columns = se.columns)
temp = min_max_scaler.fit_transform(np.asarray(bh))
bh = pd.DataFrame(temp, index = bh.index, columns = bh.columns)
temp = min_max_scaler.fit_transform(np.asarray(xin))
xin = pd.DataFrame(temp, index = xin.index, columns = xin.columns)
del temp
#mu = preprocessing.normalize(np.asarray(mu), axis = 1, norm='l1')
###############
features = pd.read_csv(os.path.join(path + 'BaronHuman') +'/Filtered_Baron_HumanPancreas_data.csv',index_col = 0, header = 0, nrows=2)
features = findDuplicated(features)
print(features.shape)
adj = sp.load_npz(os.path.join(path + 'BaronHuman') + '/adj'+ net + 'BaronHuman' + '_'+str(features.T.shape[0])+'.npz')
print(adj.shape)
adj = adj[gene_index_bh, :][:, gene_index_bh]
###############
datasets = ['Xin','BaronHuman','Muraro','Segerstolpe', 'BaronMouse']
l_xin = pd.read_csv(os.path.join(path + datasets[0]) +'/Labels.csv',index_col = None)
l_bh = pd.read_csv(os.path.join(path + datasets[1]) +'/Labels.csv',index_col = None)
l_mu = pd.read_csv(os.path.join(path + datasets[2]) +'/Labels.csv',index_col = None)
l_mu = l_mu.replace('duct','ductal')
l_mu = l_mu.replace('pp','gamma')
l_se = pd.read_csv(os.path.join(path + datasets[3]) +'/Labels.csv',index_col = None)
#labels_set = list(set(l_xin['x']) & set(l_bh['x']) & set(l_mu['x']))
if True:
labels_set = set(['alpha','beta','delta','gamma'])
index = [i for i in range(len(l_mu)) if l_mu['x'][i] in labels_set]
mu = mu.iloc[index]
l_mu = l_mu.iloc[index]
index = [i for i in range(len(l_se)) if l_se['x'][i] in labels_set]
se = se.iloc[index]
l_se = l_se.iloc[index]
index = [i for i in range(len(l_bh)) if l_bh['x'][i] in labels_set]
bh = bh.iloc[index]
l_bh = l_bh.iloc[index]
index = [i for i in range(len(l_xin)) if l_xin['x'][i] in labels_set]
xin = xin.iloc[index]
l_xin = l_xin.iloc[index]
alldata = pd.concat((xin,bh,mu,se), 0)
#alldata.to_csv(path+'Data_pancreas_4.csv')
labels = pd.concat((l_xin, l_bh, l_mu, l_se), 0)
# labels.to_csv(path+'Labels_pancreas_19.csv')
labels.columns = ['V1']
class_mapping = {label: idx for idx, label in enumerate(np.unique(labels['V1']))}
labels['V1'] = labels['V1'].map(class_mapping)
del class_mapping
labels = np.asarray(labels).reshape(-1)
###############
#shuffle_index = np.asarray([1449, 8569, 2122,2133])
shuffle_index = np.asarray([1449, 5707, 1554, 1440])
return adj, np.asarray(alldata.T), labels, shuffle_index
# In[]:
def build_adj_weight(idx_features):
edges_unordered = pd.read_csv('/users/tianyu/desktop/imputation/STRING_ggi.csv', index_col = None, usecols = [1,2,16])
# edges_unordered = np.asarray(edges_unordered[['protein1','protein2','combined_score']]) # Upper case.
edges_unordered = np.asarray(edges_unordered)
idx = []
mapped_index = idx_features.index.str.upper() # if data.index is lower case. Usoskin data is upper case, do not need it.
for i in range(len(edges_unordered)):
if edges_unordered[i,0] in mapped_index and edges_unordered[i,1] in mapped_index:
idx.append(i)
edges_unordered = edges_unordered[idx]
print ('idx_num:',len(idx))
del i,idx
# build graph
idx = np.array(mapped_index)
idx_map = {j: i for i, j in enumerate(idx)} # eg: {'TSPAN12': 0, 'TSHZ1': 1}
# the key (names) in edges_unordered --> the index (which row) in matrix
edges = np.array(list(map(idx_map.get, edges_unordered[:,0:2].flatten())),
dtype=np.int32).reshape(edges_unordered[:,0:2].shape) #map:map(function, element):function on element.
adj = sp.coo_matrix((edges_unordered[:, 2], (edges[:, 0], edges[:, 1])),
shape=(idx_features.shape[0], idx_features.shape[0]),
dtype=np.float32)
#del idx,idx_map,edges_unordered
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
#adj = (adj + sp.eye(adj.shape[0])) #diagonal, set to 1
return adj
def getAdjByBiogrid(idx_features, pathnet = '~/Google Drive/fasttext/cnn/TCGA_cnn/BIOGRID-ALL-3.5.169.tab2.txt'):
edges_unordered = pd.read_table(pathnet ,index_col=None, usecols = [7,8] )
edges_unordered = np.asarray(edges_unordered)
idx = []
for i in range(len(edges_unordered)):
if edges_unordered[i,0] in idx_features.index and edges_unordered[i,1] in idx_features.index:
idx.append(i)
edges_unordered = edges_unordered[idx]
del i,idx
# build graph
idx = np.array(idx_features.index)
idx_map = {j: i for i, j in enumerate(idx)}
# the key (names) in edges_unordered --> the index (which row) in matrix
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape) #map:map(function, element):function on element
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(idx_features.shape[0], idx_features.shape[0]),
dtype=np.float32)
del idx,idx_map,edges_unordered
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
# adj = adj + sp.eye(adj.shape[0])
# sp.save_npz(os.path.join(pathnet,'adjCancer18442.npz'), adj)
return adj
def removeZeroAdj(adj, gedata):
#feature size: genes * samples, numpy.darray
if adj[0,0] != 0:
#adj = adj - sp.eye(adj.shape[0])
adj.setdiag(0)
# adjdense = adj.todense()
indd = np.where(np.sum(adj, axis=1) != 0)[0]
adj = adj[indd, :][:, indd]
# adjdense = adjdense[indd,:]
# adjdense = adjdense[:, indd]
gedata = gedata[indd,:]
return adj, gedata
def load_cancer(concat, diseases ,path, net,num_gene):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format('cancer'))
'''
if tianyu:
gedataA = pd.read_csv("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/ge_"+diseaseA+".csv", index_col = 0)
gedataB = pd.read_csv("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/ge_"+diseaseB+".csv", index_col = 0)
cnvdataA = pd.read_csv("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/cnv_"+diseaseA+".csv", index_col = 0)
cnvdataB = pd.read_csv("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/cnv_"+diseaseB+".csv", index_col = 0)
else:
data = pd.read_csv("/users/peng/documents/tianyu/hw5ty/data10000.csv", index_col=0)
if 'T' in data.index:
print ("drop T")
data = data.drop('T')
data = data.T #samples*genes
data2 = data[ind]
data2 = data2.T #genes*samples
'''
gedata = pd.DataFrame()
cnvdata = pd.DataFrame()
labels = []
count = 0
pathgene = ("/Users/tianyu/Google Drive/fasttext/classification/TCGAcleandata/")
for disease in diseases:
tmp = pd.read_csv((pathgene + "/ge/ge_" + disease+ ".csv"), index_col = 0)
gedata = pd.concat([gedata,tmp],axis = 1)
# tmp = pd.read_csv(os.path.join(pathgene, "cnv/cnv_"+disease+".csv"),index_col = 0)
# cnvdata = pd.concat([cnvdata,tmp],axis = 1)
labels.append(np.repeat(count, tmp.shape[1]))
count += 1
labels = np.concatenate(labels)
# adj = getAdjByBiogrid(gedata, path, net)
adj = sp.load_npz(path + 'adjCancer18442.npz')
'''
gedata = pd.concat([gedataA, gedataB], axis = 1)
cnvdata = pd.concat([cnvdataA, cnvdataB], axis = 1)
labels = np.asarray([0,1,2])
labels = np.repeat(labels, [gedataA.shape[1], gedataB.shape[1]], axis=0)
'''
gedata, geneind = high_var_dfdata(gedata, num=num_gene, ind=1)
adj = adj[geneind,:][:,geneind]
adj, gedata = removeZeroAdj(adj, np.asarray(gedata))
adj = normalize(adj)
adj = adj.astype('float32')
labels = labels.astype('uint8')
return adj, gedata, labels
# In[]:
def load_cluster(filepath,num_gene):
data = | pd.read_csv(filepath+'/separateData/GeneLabel10000.csv',index_col = 0) | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
def load_hydroSystem(file_path):
'''
Função responsável pelo processamento e importação dos dados do Condition Monitoring of Hydraulic Systems Dataset
I/O:
path: uma string contendo o diretório onde os conjuntos de dados dos sensores estão contidos;
return: um Numpy Array de formato ((nº de instâncias, timestamp, features), label)
'''
# Listagem dos arquivos contendo os dados dos sensores
load_names = os.listdir(file_path)
load_names.remove('description.txt')
load_names.remove('documentation.txt')
# Indexição das colunas para o upsamplig das variáveis com maior taxa de amostragem
cols_1 = np.arange(0, 6000, 100)
cols_10 = np.arange(0, 6000, 10)
# Importação dos dados contidos nos arquivos ".txt"
# Features
pressure = []
flow = []
temp = []
print('Carregamento dos conjuntos de dados:')
for name in tqdm(load_names):
if 'PS' in name and name != 'EPS1.txt':
ps = pd.read_csv(f'{file_path}{name}', delimiter='\t', header=None)
pressure.append(ps)
elif 'FS' in name:
aux = pd.read_csv(f'{file_path}{name}', delimiter='\t', header=None)
fs = pd.DataFrame(data=np.nan*np.ones((aux.shape[0], 6000)))
fs[cols_10] = aux.values
fs = fs.interpolate(axis='columns')
flow.append(fs)
elif 'TS' in name:
aux = pd.read_csv(f'{file_path}{name}', delimiter='\t', header=None)
t = pd.DataFrame(data=np.nan*np.ones((aux.shape[0], 6000)))
t[cols_1] = aux.values
t = t.interpolate(axis='columns')
temp.append(t)
eps = pd.read_csv(f'{file_path}EPS1.txt', delimiter='\t', header=None)
vs = | pd.read_csv(f'{file_path}VS1.txt', delimiter='\t', header=None) | pandas.read_csv |
"""
>>> from blaze.expr import Symbol
>>> from blaze.compute.pandas import compute
>>> accounts = Symbol('accounts', 'var * {name: string, amount: int}')
>>> deadbeats = accounts[accounts['amount'] < 0]['name']
>>> from pandas import DataFrame
>>> data = [['Alice', 100], ['Bob', -50], ['Charlie', -20]]
>>> df = DataFrame(data, columns=['name', 'amount'])
>>> compute(deadbeats, df)
1 Bob
2 Charlie
Name: name, dtype: object
"""
from __future__ import absolute_import, division, print_function
import pandas as pd
from pandas.core.generic import NDFrame
from pandas import DataFrame, Series
from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
import numpy as np
from collections import defaultdict
from toolz import merge as merge_dicts
import fnmatch
from datashape.predicates import isscalar
from ..api.into import into
from ..dispatch import dispatch
from ..expr import (Projection, Field, Sort, Head, Broadcast, Selection,
Reduction, Distinct, Join, By, Summary, Label, ReLabel,
Map, Apply, Merge, Union, std, var, Like, Slice,
ElemWise, DateTime, Millisecond, Expr, Symbol)
from ..expr import UnaryOp, BinOp
from ..expr import Symbol, common_subexpression
from .core import compute, compute_up, base
from ..compatibility import _inttypes
__all__ = []
@dispatch(Projection, DataFrame)
def compute_up(t, df, **kwargs):
return df[list(t.fields)]
@dispatch(Field, (DataFrame, DataFrameGroupBy))
def compute_up(t, df, **kwargs):
return df[t.fields[0]]
@dispatch(Broadcast, DataFrame)
def compute_up(t, df, **kwargs):
d = dict((t._child[c]._expr, df[c]) for c in t._child.fields)
return compute(t._expr, d)
@dispatch(Broadcast, Series)
def compute_up(t, s, **kwargs):
return compute_up(t, s.to_frame(), **kwargs)
@dispatch(BinOp, Series, (Series, base))
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(BinOp, (Series, base), Series)
def compute_up(t, lhs, rhs, **kwargs):
return t.op(lhs, rhs)
@dispatch(UnaryOp, NDFrame)
def compute_up(t, df, **kwargs):
f = getattr(t, 'op', getattr(np, t.symbol, None))
if f is None:
raise ValueError('%s is not a valid operation on %s objects' %
(t.symbol, type(df).__name__))
return f(df)
@dispatch(Selection, (Series, DataFrame))
def compute_up(t, df, **kwargs):
predicate = compute(t.predicate, {t._child: df})
return df[predicate]
@dispatch(Symbol, DataFrame)
def compute_up(t, df, **kwargs):
if not list(t.fields) == list(df.names):
# TODO also check dtype
raise ValueError("Schema mismatch: \n\nTable:\n%s\n\nDataFrame:\n%s"
% (t, df))
return df
@dispatch(Join, DataFrame, DataFrame)
def compute_up(t, lhs, rhs, **kwargs):
""" Join two pandas data frames on arbitrary columns
The approach taken here could probably be improved.
To join on two columns we force each column to be the index of the
dataframe, perform the join, and then reset the index back to the left
side's original index.
"""
result = pd.merge(lhs, rhs,
left_on=t.on_left, right_on=t.on_right,
how=t.how)
return result.reset_index()[t.fields]
@dispatch(Symbol, (DataFrameGroupBy, SeriesGroupBy))
def compute_up(t, gb, **kwargs):
return gb
def post_reduction(result):
# pandas may return an int, numpy scalar or non scalar here so we need to
# program defensively so that things are JSON serializable
try:
return result.item()
except (AttributeError, ValueError):
return result
@dispatch(Reduction, (Series, SeriesGroupBy))
def compute_up(t, s, **kwargs):
result = post_reduction(getattr(s, t.symbol)())
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch((std, var), (Series, SeriesGroupBy))
def compute_up(t, s, **kwargs):
result = post_reduction(getattr(s, t.symbol)(ddof=t.unbiased))
if t.keepdims:
result = Series([result], name=s.name)
return result
@dispatch(Distinct, DataFrame)
def compute_up(t, df, **kwargs):
return df.drop_duplicates()
@dispatch(Distinct, Series)
def compute_up(t, s, **kwargs):
s2 = Series(s.unique())
s2.name = s.name
return s2
def unpack(seq):
""" Unpack sequence of length one
>>> unpack([1, 2, 3])
[1, 2, 3]
>>> unpack([1])
1
"""
seq = list(seq)
if len(seq) == 1:
seq = seq[0]
return seq
Grouper = ElemWise, Series, list
@dispatch(By, list, DataFrame)
def get_grouper(c, grouper, df):
return grouper
@dispatch(By, (ElemWise, Series), NDFrame)
def get_grouper(c, grouper, df):
return compute(grouper, {c._child: df})
@dispatch(By, (Field, Projection), NDFrame)
def get_grouper(c, grouper, df):
return grouper.fields
@dispatch(By, Reduction, Grouper, NDFrame)
def compute_by(t, r, g, df):
names = [r._name]
preapply = compute(r._child, {t._child: df})
# Pandas and Blaze column naming schemes differ
# Coerce DataFrame column names to match Blaze's names
preapply = preapply.copy()
if isinstance(preapply, Series):
preapply.name = names[0]
else:
preapply.names = names
group_df = concat_nodup(df, preapply)
gb = group_df.groupby(g)
groups = gb[names[0] if isscalar(t.apply._child.dshape.measure) else names]
return compute_up(r, groups) # do reduction
@dispatch(By, Summary, Grouper, NDFrame)
def compute_by(t, s, g, df):
names = s.fields
preapply = DataFrame(dict(zip(names,
(compute(v._child, {t._child: df})
for v in s.values))))
df2 = concat_nodup(df, preapply)
groups = df2.groupby(g)
d = defaultdict(list)
for name, v in zip(names, s.values):
d[name].append(getattr(Series, v.symbol))
result = groups.agg(dict(d))
# Rearrange columns to match names order
result = result[sorted(result.columns, key=lambda t: names.index(t[0]))]
result.columns = t.apply.fields # flatten down multiindex
return result
@dispatch(Expr, DataFrame)
def post_compute_by(t, df):
return df.reset_index(drop=True)
@dispatch((Summary, Reduction), DataFrame)
def post_compute_by(t, df):
return df.reset_index()
@dispatch(By, NDFrame)
def compute_up(t, df, **kwargs):
grouper = get_grouper(t, t.grouper, df)
result = compute_by(t, t.apply, grouper, df)
return post_compute_by(t.apply, into(DataFrame, result))
def concat_nodup(a, b):
""" Concatenate two dataframes/series without duplicately named columns
>>> df = DataFrame([[1, 'Alice', 100],
... [2, 'Bob', -200],
... [3, 'Charlie', 300]],
... columns=['id','name', 'amount'])
>>> concat_nodup(df, df)
id name amount
0 1 Alice 100
1 2 Bob -200
2 3 Charlie 300
>>> concat_nodup(df.name, df.amount)
name amount
0 Alice 100
1 Bob -200
2 Charlie 300
>>> concat_nodup(df, df.amount + df.id)
id name amount 0
0 1 Alice 100 101
1 2 Bob -200 -198
2 3 Charlie 300 303
"""
if isinstance(a, DataFrame) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c not in a.columns]]],
axis=1)
if isinstance(a, DataFrame) and isinstance(b, Series):
if b.name not in a.columns:
return pd.concat([a, b], axis=1)
else:
return a
if isinstance(a, Series) and isinstance(b, DataFrame):
return pd.concat([a, b[[c for c in b.columns if c != a.name]]], axis=1)
if isinstance(a, Series) and isinstance(b, Series):
if a.name == b.name:
return a
else:
return pd.concat([a, b], axis=1)
@dispatch(Sort, DataFrame)
def compute_up(t, df, **kwargs):
return df.sort(t.key, ascending=t.ascending)
@dispatch(Sort, Series)
def compute_up(t, s, **kwargs):
return s.order(ascending=t.ascending)
@dispatch(Head, (Series, DataFrame))
def compute_up(t, df, **kwargs):
return df.head(t.n)
@dispatch(Label, DataFrame)
def compute_up(t, df, **kwargs):
return | DataFrame(df, columns=[t.label]) | pandas.DataFrame |
"""
Twitter data sourcing
"""
from twython import TwythonStreamer
from twython import Twython
import csv
import pandas as pd
import time
credentials = {}
credentials['CONSUMER_KEY'] = "<PATE_YOUR_API_KEY>"
credentials['CONSUMER_SECRET'] = "<PASTE_YOUR_API_KEY>"
tweets = Twython(credentials['CONSUMER_KEY'], credentials['CONSUMER_SECRET'])
iterations = 0
count2 = 93
dict_ = {'user': [], 'date': [], 'text': [], 'favorite_count': [], 'location': []}
# df = pd.DataFrame(dict_)
while iterations <= 12000:
query = {'q': '#AI OR #ArtificialIntelligence OR #MachineLearning OR #DeepLearning OR #SelfDrivingCar OR #ReinforcementLearning OR #DeepMind OR #Google OR AI OR Artificial Intelligence OR Machine Learning OR Deep Learning OR Self Driving Car OR Reinforcement Learning OR Elon Musk OR Andrew NG OR DeepMind OR Google', \
'count': 1000, 'lang': 'en'}
for status in tweets.search(**query)['statuses']:
if status['user']['location'] != '':
dict_['user'].append(status['user']['screen_name'])
dict_['date'].append(status['created_at'])
dict_['text'].append(status['text'])
dict_['favorite_count'].append(status['favorite_count'])
dict_['location'].append(status['user']['location'])
iterations += 1
if iterations % 300 == 0:
df = | pd.DataFrame(dict_) | pandas.DataFrame |
import random
import unittest
import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, to_timestamp, to_date
from pyspark.sql.types import StringType, StructField, StructType, IntegerType, FloatType
from haychecker.dhc.metrics import timeliness
replace_empty_with_null = udf(lambda x: None if x == "" else x, StringType())
replace_0_with_null = udf(lambda x: None if x == 0 else x, IntegerType())
replace_0dot_with_null = udf(lambda x: None if x == 0. else x, FloatType())
replace_every_string_with_null = udf(lambda x: None, StringType())
replace_every_int_with_null = udf(lambda x: None, IntegerType())
replace_every_float_with_null = udf(lambda x: None, FloatType())
class TestTimeliness(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestTimeliness, self).__init__(*args, **kwargs)
self.spark = SparkSession.builder.master("local[2]").appName("timeliness_test").getOrCreate()
self.spark.sparkContext.setLogLevel("ERROR")
def test_empty(self):
data = pd.DataFrame()
data["c1"] = []
data["c2"] = []
schema = [StructField("c1", StringType(), True), StructField("c2", StringType(), True)]
df = self.spark.createDataFrame(data, StructType(schema))
r1, r2 = timeliness(["c1", "c2"], dateFormat="dd:MM:yyyy", df=df, value="10:22:1980")
self.assertEqual(r1, 100.)
self.assertEqual(r2, 100.)
def test_allnull(self):
data = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import operator
from collections import OrderedDict
from datetime import datetime
from itertools import chain
import warnings
import numpy as np
from pandas import (notna, DataFrame, Series, MultiIndex, date_range,
Timestamp, compat)
import pandas as pd
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.core.apply import frame_apply
from pandas.util.testing import (assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.conftest import _get_cython_table_params
from pandas.tests.frame.common import TestData
class TestDataFrameApply(TestData):
def test_apply(self):
with np.errstate(all='ignore'):
# ufunc
applied = self.frame.apply(np.sqrt)
tm.assert_series_equal(np.sqrt(self.frame['A']), applied['A'])
# aggregator
applied = self.frame.apply(np.mean)
assert applied['A'] == np.mean(self.frame['A'])
d = self.frame.index[0]
applied = self.frame.apply(np.mean, axis=1)
assert applied[d] == np.mean(self.frame.xs(d))
assert applied.index is self.frame.index # want this
# invalid axis
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
pytest.raises(ValueError, df.apply, lambda x: x, 2)
# see gh-9573
df = DataFrame({'c0': ['A', 'A', 'B', 'B'],
'c1': ['C', 'C', 'D', 'D']})
df = df.apply(lambda ts: ts.astype('category'))
assert df.shape == (4, 2)
assert isinstance(df['c0'].dtype, CategoricalDtype)
assert isinstance(df['c1'].dtype, CategoricalDtype)
def test_apply_mixed_datetimelike(self):
# mixed datetimelike
# GH 7778
df = DataFrame({'A': date_range('20130101', periods=3),
'B': pd.to_timedelta(np.arange(3), unit='s')})
result = df.apply(lambda x: x, axis=1)
assert_frame_equal(result, df)
def test_apply_empty(self):
# empty
applied = self.empty.apply(np.sqrt)
assert applied.empty
applied = self.empty.apply(np.mean)
assert applied.empty
no_rows = self.frame[:0]
result = no_rows.apply(lambda x: x.mean())
expected = Series(np.nan, index=self.frame.columns)
assert_series_equal(result, expected)
no_cols = self.frame.loc[:, []]
result = no_cols.apply(lambda x: x.mean(), axis=1)
expected = Series(np.nan, index=self.frame.index)
assert_series_equal(result, expected)
# 2476
xp = DataFrame(index=['a'])
rs = xp.apply(lambda x: x['a'], axis=1)
assert_frame_equal(xp, rs)
def test_apply_with_reduce_empty(self):
# reduce with an empty DataFrame
x = []
result = self.empty.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, self.empty)
result = self.empty.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
empty_with_cols = DataFrame(columns=['a', 'b', 'c'])
result = empty_with_cols.apply(x.append, axis=1, result_type='expand')
assert_frame_equal(result, empty_with_cols)
result = empty_with_cols.apply(x.append, axis=1, result_type='reduce')
assert_series_equal(result, Series(
[], index=pd.Index([], dtype=object)))
# Ensure that x.append hasn't been called
assert x == []
def test_apply_deprecate_reduce(self):
with warnings.catch_warnings(record=True):
x = []
self.empty.apply(x.append, axis=1, result_type='reduce')
def test_apply_standard_nonunique(self):
df = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1, 4, 7], ['a', 'a', 'c'])
assert_series_equal(rs, xp)
rs = df.T.apply(lambda s: s[0], axis=0)
assert_series_equal(rs, xp)
def test_with_string_args(self):
for arg in ['sum', 'mean', 'min', 'max', 'std']:
result = self.frame.apply(arg)
expected = getattr(self.frame, arg)()
tm.assert_series_equal(result, expected)
result = self.frame.apply(arg, axis=1)
expected = getattr(self.frame, arg)(axis=1)
tm.assert_series_equal(result, expected)
def test_apply_broadcast_deprecated(self):
with tm.assert_produces_warning(FutureWarning):
self.frame.apply(np.mean, broadcast=True)
def test_apply_broadcast(self):
# scalars
result = self.frame.apply(np.mean, result_type='broadcast')
expected = DataFrame([self.frame.mean()], index=self.frame.index)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(np.mean, axis=1, result_type='broadcast')
m = self.frame.mean(axis=1)
expected = DataFrame({c: m for c in self.frame.columns})
tm.assert_frame_equal(result, expected)
# lists
result = self.frame.apply(
lambda x: list(range(len(self.frame.columns))),
axis=1,
result_type='broadcast')
m = list(range(len(self.frame.columns)))
expected = DataFrame([m] * len(self.frame.index),
dtype='float64',
index=self.frame.index,
columns=self.frame.columns)
tm.assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: list(range(len(self.frame.index))),
result_type='broadcast')
m = list(range(len(self.frame.index)))
expected = DataFrame({c: m for c in self.frame.columns},
dtype='float64',
index=self.frame.index)
tm.assert_frame_equal(result, expected)
# preserve columns
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: [1, 2, 3],
axis=1,
result_type='broadcast')
tm.assert_frame_equal(result, df)
df = DataFrame(np.tile(np.arange(3), 6).reshape(6, -1) + 1,
columns=list('ABC'))
result = df.apply(lambda x: Series([1, 2, 3], index=list('abc')),
axis=1,
result_type='broadcast')
expected = df.copy()
tm.assert_frame_equal(result, expected)
def test_apply_broadcast_error(self):
df = DataFrame(
np.tile(np.arange(3, dtype='int64'), 6).reshape(6, -1) + 1,
columns=['A', 'B', 'C'])
# > 1 ndim
with pytest.raises(ValueError):
df.apply(lambda x: np.array([1, 2]).reshape(-1, 2),
axis=1,
result_type='broadcast')
# cannot broadcast
with pytest.raises(ValueError):
df.apply(lambda x: [1, 2],
axis=1,
result_type='broadcast')
with pytest.raises(ValueError):
df.apply(lambda x: Series([1, 2]),
axis=1,
result_type='broadcast')
def test_apply_raw(self):
result0 = self.frame.apply(np.mean, raw=True)
result1 = self.frame.apply(np.mean, axis=1, raw=True)
expected0 = self.frame.apply(lambda x: x.values.mean())
expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)
assert_series_equal(result0, expected0)
assert_series_equal(result1, expected1)
# no reduction
result = self.frame.apply(lambda x: x * 2, raw=True)
expected = self.frame * 2
assert_frame_equal(result, expected)
def test_apply_axis1(self):
d = self.frame.index[0]
tapplied = self.frame.apply(np.mean, axis=1)
assert tapplied[d] == np.mean(self.frame.xs(d))
def test_apply_ignore_failures(self):
result = frame_apply(self.mixed_frame,
np.mean, 0,
ignore_failures=True).apply_standard()
expected = self.mixed_frame._get_numeric_data().apply(np.mean)
assert_series_equal(result, expected)
def test_apply_mixed_dtype_corner(self):
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df[:0].apply(np.mean, axis=1)
# the result here is actually kind of ambiguous, should it be a Series
# or a DataFrame?
expected = Series(np.nan, index=pd.Index([], dtype='int64'))
assert_series_equal(result, expected)
df = DataFrame({'A': ['foo'],
'B': [1.]})
result = df.apply(lambda x: x['A'], axis=1)
expected = Series(['foo'], index=[0])
assert_series_equal(result, expected)
result = df.apply(lambda x: x['B'], axis=1)
expected = Series([1.], index=[0])
assert_series_equal(result, expected)
def test_apply_empty_infer_type(self):
no_cols = DataFrame(index=['a', 'b', 'c'])
no_index = DataFrame(columns=['a', 'b', 'c'])
def _check(df, f):
with warnings.catch_warnings(record=True):
test_res = f(np.array([], dtype='f8'))
is_reduction = not isinstance(test_res, np.ndarray)
def _checkit(axis=0, raw=False):
res = df.apply(f, axis=axis, raw=raw)
if is_reduction:
agg_axis = df._get_agg_axis(axis)
assert isinstance(res, Series)
assert res.index is agg_axis
else:
assert isinstance(res, DataFrame)
_checkit()
_checkit(axis=1)
_checkit(raw=True)
_checkit(axis=0, raw=True)
with np.errstate(all='ignore'):
_check(no_cols, lambda x: x)
_check(no_cols, lambda x: x.mean())
_check(no_index, lambda x: x)
_check(no_index, lambda x: x.mean())
result = no_cols.apply(lambda x: x.mean(), result_type='broadcast')
assert isinstance(result, DataFrame)
def test_apply_with_args_kwds(self):
def add_some(x, howmuch=0):
return x + howmuch
def agg_and_add(x, howmuch=0):
return x.mean() + howmuch
def subtract_and_divide(x, sub, divide=1):
return (x - sub) / divide
result = self.frame.apply(add_some, howmuch=2)
exp = self.frame.apply(lambda x: x + 2)
assert_frame_equal(result, exp)
result = self.frame.apply(agg_and_add, howmuch=2)
exp = self.frame.apply(lambda x: x.mean() + 2)
assert_series_equal(result, exp)
res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)
exp = self.frame.apply(lambda x: (x - 2.) / 2.)
assert_frame_equal(res, exp)
def test_apply_yield_list(self):
result = self.frame.apply(list)
assert_frame_equal(result, self.frame)
def test_apply_reduce_Series(self):
self.frame.loc[::2, 'A'] = np.nan
expected = self.frame.mean(1)
result = self.frame.apply(np.mean, axis=1)
assert_series_equal(result, expected)
def test_apply_differently_indexed(self):
df = DataFrame(np.random.randn(20, 10))
result0 = df.apply(Series.describe, axis=0)
expected0 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df)),
columns=df.columns)
assert_frame_equal(result0, expected0)
result1 = df.apply(Series.describe, axis=1)
expected1 = DataFrame(dict((i, v.describe())
for i, v in compat.iteritems(df.T)),
columns=df.index).T
assert_frame_equal(result1, expected1)
def test_apply_modify_traceback(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
data.loc[4, 'C'] = np.nan
def transform(row):
if row['C'].startswith('shin') and row['A'] == 'foo':
row['D'] = 7
return row
def transform2(row):
if (notna(row['C']) and row['C'].startswith('shin') and
row['A'] == 'foo'):
row['D'] = 7
return row
try:
data.apply(transform, axis=1)
except AttributeError as e:
assert len(e.args) == 2
assert e.args[1] == 'occurred at index 4'
assert e.args[0] == "'float' object has no attribute 'startswith'"
def test_apply_bug(self):
# GH 6125
positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],
[1, 'DEF0', 20], [2, 'ABC1', 50],
[2, 'YUM1', 20], [2, 'DEF1', 20]],
columns=['a', 'market', 'position'])
def f(r):
return r['market']
expected = positions.apply(f, axis=1)
positions = DataFrame([[datetime(2013, 1, 1), 'ABC0', 50],
[datetime(2013, 1, 2), 'YUM0', 20],
[datetime(2013, 1, 3), 'DEF0', 20],
[datetime(2013, 1, 4), 'ABC1', 50],
[datetime(2013, 1, 5), 'YUM1', 20],
[datetime(2013, 1, 6), 'DEF1', 20]],
columns=['a', 'market', 'position'])
result = positions.apply(f, axis=1)
assert_series_equal(result, expected)
def test_apply_convert_objects(self):
data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',
'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two',
'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull',
'dull', 'shiny', 'shiny', 'dull',
'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
result = data.apply(lambda x: x, axis=1)
assert_frame_equal(result._convert(datetime=True), data)
def test_apply_attach_name(self):
result = self.frame.apply(lambda x: x.name)
expected = Series(self.frame.columns, index=self.frame.columns)
assert_series_equal(result, expected)
result = self.frame.apply(lambda x: x.name, axis=1)
expected = Series(self.frame.index, index=self.frame.index)
assert_series_equal(result, expected)
# non-reductions
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))
expected = DataFrame(np.tile(self.frame.columns,
(len(self.frame.index), 1)),
index=self.frame.index,
columns=self.frame.columns)
assert_frame_equal(result, expected)
result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),
axis=1)
expected = Series(np.repeat(t[0], len(self.frame.columns))
for t in self.frame.itertuples())
expected.index = self.frame.index
assert_series_equal(result, expected)
def test_apply_multi_index(self):
index = MultiIndex.from_arrays([['a', 'a', 'b'], ['c', 'd', 'd']])
s = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['col1', 'col2'])
result = s.apply(
lambda x: Series({'min': min(x), 'max': max(x)}), 1)
expected = DataFrame([[1, 2], [3, 4], [5, 6]],
index=index,
columns=['min', 'max'])
assert_frame_equal(result, expected, check_like=True)
def test_apply_dict(self):
# GH 8735
A = DataFrame([['foo', 'bar'], ['spam', 'eggs']])
A_dicts = Series([dict([(0, 'foo'), (1, 'spam')]),
dict([(0, 'bar'), (1, 'eggs')])])
B = DataFrame([[0, 1], [2, 3]])
B_dicts = Series([dict([(0, 0), (1, 2)]), dict([(0, 1), (1, 3)])])
fn = lambda x: x.to_dict()
for df, dicts in [(A, A_dicts), (B, B_dicts)]:
reduce_true = df.apply(fn, result_type='reduce')
reduce_false = df.apply(fn, result_type='expand')
reduce_none = df.apply(fn)
assert_series_equal(reduce_true, dicts)
assert_frame_equal(reduce_false, df)
assert_series_equal(reduce_none, dicts)
def test_applymap(self):
applied = self.frame.applymap(lambda x: x * 2)
tm.assert_frame_equal(applied, self.frame * 2)
self.frame.applymap(type)
# gh-465: function returning tuples
result = self.frame.applymap(lambda x: (x, x))
assert isinstance(result['A'][0], tuple)
# gh-2909: object conversion to float in constructor?
df = DataFrame(data=[1, 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
df = DataFrame(data=[1., 'a'])
result = df.applymap(lambda x: x)
assert result.dtypes[0] == object
# see gh-2786
df = DataFrame(np.random.random((3, 4)))
df2 = df.copy()
cols = ['a', 'a', 'a', 'a']
df.columns = cols
expected = df2.applymap(str)
expected.columns = cols
result = df.applymap(str)
tm.assert_frame_equal(result, expected)
# datetime/timedelta
df['datetime'] = Timestamp('20130101')
df['timedelta'] = pd.Timedelta('1 min')
result = df.applymap(str)
for f in ['datetime', 'timedelta']:
assert result.loc[0, f] == str(df.loc[0, f])
# see gh-8222
empty_frames = [pd.DataFrame(),
pd.DataFrame(columns=list('ABC')),
pd.DataFrame(index=list('ABC')),
pd.DataFrame({'A': [], 'B': [], 'C': []})]
for frame in empty_frames:
for func in [round, lambda x: x]:
result = frame.applymap(func)
tm.assert_frame_equal(result, frame)
def test_applymap_box_timestamps(self):
# #2689, #2627
ser = pd.Series(date_range('1/1/2000', periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
pd.DataFrame(ser).applymap(func)
def test_applymap_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
df = pd.DataFrame({'a': [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02')],
'b': [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')],
'c': [pd.Timedelta('1 days'),
pd.Timedelta('2 days')],
'd': [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]})
res = df.applymap(lambda x: '{0}'.format(x.__class__.__name__))
exp = pd.DataFrame({'a': ['Timestamp', 'Timestamp'],
'b': ['Timestamp', 'Timestamp'],
'c': ['Timedelta', 'Timedelta'],
'd': ['Period', 'Period']})
tm.assert_frame_equal(res, exp)
def test_frame_apply_dont_convert_datetime64(self):
from pandas.tseries.offsets import BDay
df = DataFrame({'x1': [datetime(1996, 1, 1)]})
df = df.applymap(lambda x: x + BDay())
df = df.applymap(lambda x: x + BDay())
assert df.x1.dtype == 'M8[ns]'
def test_apply_non_numpy_dtype(self):
# See gh-12244
df = DataFrame({'dt': pd.date_range(
"2015-01-01", periods=3, tz='Europe/Brussels')})
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
result = df.apply(lambda x: x + pd.Timedelta('1day'))
expected = DataFrame({'dt': pd.date_range(
"2015-01-02", periods=3, tz='Europe/Brussels')})
assert_frame_equal(result, expected)
df = DataFrame({'dt': ['a', 'b', 'c', 'a']}, dtype='category')
result = df.apply(lambda x: x)
assert_frame_equal(result, df)
def test_apply_dup_names_multi_agg(self):
# GH 21063
df = pd.DataFrame([[0, 1], [2, 3]], columns=['a', 'a'])
expected = pd.DataFrame([[0, 1]], columns=['a', 'a'], index=['min'])
result = df.agg(['min'])
tm.assert_frame_equal(result, expected)
class TestInferOutputShape(object):
# the user has supplied an opaque UDF where
# they are transforming the input that requires
# us to infer the output
def test_infer_row_shape(self):
# gh-17437
# if row shape is changing, infer it
df = pd.DataFrame(np.random.rand(10, 2))
result = df.apply(np.fft.fft, axis=0)
assert result.shape == (10, 2)
result = df.apply(np.fft.rfft, axis=0)
assert result.shape == (6, 2)
def test_with_dictlike_columns(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
expected = Series([{'s': 3} for t in df.itertuples()])
assert_series_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1)
assert_series_equal(result, expected)
# compose a series
result = (df['a'] + df['b']).apply(lambda x: {'s': x})
expected = Series([{'s': 3}, {'s': 3}])
assert_series_equal(result, expected)
# gh-18775
df = DataFrame()
df["author"] = ["X", "Y", "Z"]
df["publisher"] = ["BBC", "NBC", "N24"]
df["date"] = pd.to_datetime(['17-10-2010 07:15:30',
'13-05-2011 08:20:35',
'15-01-2013 09:09:09'])
result = df.apply(lambda x: {}, axis=1)
expected = Series([{}, {}, {}])
assert_series_equal(result, expected)
def test_with_dictlike_columns_with_infer(self):
# gh 17602
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
expected = DataFrame({'s': [3, 3]})
assert_frame_equal(result, expected)
df['tm'] = [pd.Timestamp('2017-05-01 00:00:00'),
pd.Timestamp('2017-05-02 00:00:00')]
result = df.apply(lambda x: {'s': x['a'] + x['b']},
axis=1, result_type='expand')
assert_frame_equal(result, expected)
def test_with_listlike_columns(self):
# gh-17348
df = DataFrame({'a': Series(np.random.randn(4)),
'b': ['a', 'list', 'of', 'words'],
'ts': date_range('2016-10-01', periods=4, freq='H')})
result = df[['a', 'b']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'b']].itertuples()])
assert_series_equal(result, expected)
result = df[['a', 'ts']].apply(tuple, axis=1)
expected = Series([t[1:] for t in df[['a', 'ts']].itertuples()])
assert_series_equal(result, expected)
# gh-18919
df = DataFrame({'x': Series([['a', 'b'], ['q']]),
'y': Series([['z'], ['q', 't']])})
df.index = MultiIndex.from_tuples([('i0', 'j0'), ('i1', 'j1')])
result = df.apply(
lambda row: [el for el in row['x'] if el in row['y']],
axis=1)
expected = Series([[], ['q']], index=df.index)
assert_series_equal(result, expected)
def test_infer_output_shape_columns(self):
# gh-18573
df = DataFrame({'number': [1., 2.],
'string': ['foo', 'bar'],
'datetime': [pd.Timestamp('2017-11-29 03:30:00'),
pd.Timestamp('2017-11-29 03:45:00')]})
result = df.apply(lambda row: (row.number, row.string), axis=1)
expected = Series([(t.number, t.string) for t in df.itertuples()])
assert_series_equal(result, expected)
def test_infer_output_shape_listlike_columns(self):
# gh-16353
df = DataFrame(np.random.randn(6, 3), columns=['A', 'B', 'C'])
result = df.apply(lambda x: [1, 2, 3], axis=1)
expected = Series([[1, 2, 3] for t in df.itertuples()])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import pandas as pd
import numpy as np
import statsmodels.formula.api as smf
# open df get list for new df rows
dyn = | pd.read_csv(snakemake.input.dyn_csv, index_col=0) | pandas.read_csv |
import requests
import warnings
import threading
import numpy as np
import pandas as pd
import datetime as dt
warnings.simplefilter(action='ignore', category=DeprecationWarning)
results = []
class Death():
def __init__(self, timestamp, player_name, inventory):
self.player_name = player_name
self.timestamp = timestamp
self.inventory = inventory
def get_info(self):
return self.timestamp, self.player_name, self.inventory
class SuspectedRat():
def __init__(self, player_name, timeout=5):
self.threads = []
self.timeframe = []
self.player_name = player_name
self.player_id = ''
self.player_death_ids = []
self.player_deaths = []
self.timeout = timeout
def item_id_to_item_name(self, item_id):
try:
item_name = requests.get(url=f'https://gameinfo.albiononline.com/api/gameinfo/items/{item_id}/data', timeout=self.timeout).json()['localizedNames']['EN-US']
return item_name
except Exception:
pass
def get_player_id(self):
try:
self.player_id = requests.get(url=f'https://gameinfo.albiononline.com/api/gameinfo/search?q={self.player_name}', timeout=self.timeout).json()['players'][0]['Id']
except Exception:
pass
def get_player_deaths(self):
try:
player_deaths_temp = requests.get(url=f'https://gameinfo.albiononline.com/api/gameinfo/players/{self.player_id}/deaths', timeout=self.timeout).json()
for death in player_deaths_temp:
self.player_death_ids.append(death['EventId'])
except Exception:
pass
def parse_player_death(self, death_id):
try:
inventory = []
player_death = requests.get(url=f'https://gameinfo.albiononline.com/api/gameinfo/events/{death_id}', timeout=self.timeout).json()
inventory_temp = player_death['Victim']['Inventory']
timestamp = player_death['TimeStamp']
for item in inventory_temp:
if item:
if '@' in item['Type']:
inventory.append([self.item_id_to_item_name(item['Type']), int(item['Type'].split('@')[1]), int(item['Count'])])
else:
inventory.append([self.item_id_to_item_name(item['Type']), 0, int(item['Count'])])
if inventory:
self.player_deaths.append(Death(timestamp.split('Z')[0], self.player_name, inventory))
except Exception:
pass
def parse_player_deaths(self):
self.get_player_id()
self.get_player_deaths()
for death_id in self.player_death_ids:
self.threads.append(threading.Thread(target=self.parse_player_death, args=(death_id,)))
for thread in self.threads:
thread.start()
for thread in self.threads:
thread.join()
def player_deaths_to_df(self, index):
global results
player_deaths_list = []
self.parse_player_deaths()
for death in self.player_deaths:
info = death.get_info()
for item in info[2]:
player_deaths_list.append([pd.to_datetime(info[0]), info[1], item[0], item[1], item[2]])
player_deaths_df = | pd.DataFrame(player_deaths_list, columns=['Date', 'Player Name', 'Item Name', 'Enchantment', 'Amount']) | pandas.DataFrame |
import numpy as np
import matplotlib.pyplot as plt
from numpy import array,identity,diagonal
import os
import numpy
import pandas as pd
import sys
import random
import math
#from scipy.linalg import svd
from math import sqrt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from random import randrange
import operator
from sklearn.metrics import f1_score
from sklearn.decomposition import PCA
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.preprocessing import Imputer
import re, string, unicodedata
import nltk
import warnings
from itertools import combinations
from sklearn.metrics.pairwise import pairwise_distances
from nltk.corpus import stopwords
import bayes as b1
import nn as nn1
import projections as pro1
import lsh as lsh1
warnings.filterwarnings("ignore")
np.set_printoptions(threshold=np.nan)
prior={}
dic1={}
dic2={}
testset=[]
trainset=[]
trainlabel=[]
testlabel=[]
prior={}
train=[]
labels=[]
def F1_score_1(testlabel,predictions):
for i in range(len(testlabel)):
false_negative=0
false_positive=0
true_negative=0
true_positive=0
if testlabel[i]!=predictions[i]:
if predictions[i]==0:
false_negative=false_negative+1
else:
false_positive=false_positive+1
else:
if predictions[i]==0:
true_negative=true_negative+1
else:
true_positive=true_positive+1
precision=0
recall=0
precision=true_positive/(true_positive+false_positive)
recall=true_positive/(true_positive+false_negative)
f1_score_micro=0
f1_score_macro=0
def F1_score(testlabel,predictions):
return ((f1_score(testlabel, predictions, average='macro')),(f1_score(testlabel, predictions, average='micro')))
def cross_validation_k(train,labels,k):
k=10
#global train,labels
classes={}
index=0
for labelinst in labels:
#print(labelinst)
if labelinst[0] in classes:
classes[labelinst[0]].add(index)
else:
classes[labelinst[0]] = {index}
index=index+1
fold_classes_list={}
for label in classes:
l=len(list(classes[label]))
dataset_copy=list(classes[label])
dataset_split = list()
fold_size = (int(l / k))
for i in range(k):
fold = list()
while len(fold) < fold_size:
index = randrange(len(dataset_copy))
fold.append(dataset_copy.pop(index))
dataset_split.append(fold)
#print(dataset_split)
fold_classes_list[label]=dataset_split
#print(fold_classes_list[0])
list_k_fold=[0 for i in range(k)]
list_k_fold1=[0 for i in range(k)]
for i in range(k):
list_small=[]
for label in fold_classes_list:
list_small.extend(fold_classes_list[label][i])
list_k_fold[i]=list(list_small)
#print(list_k_fold)
return list_k_fold
def testing_dolphin_pubmed(testfile,labelfile):
print("FOR DOLPHIN DATA SET: ")
#f2.write(str("DOLPHIN DATA SET"))
train = | pd.read_csv(testfile,delimiter=' ',header=None) | pandas.read_csv |
import os
import sys
import numpy as np
import pandas as pd
from scipy import sparse
from tensorflow import keras
sys.path.append("libs")
from utils import sigmoid
class ContentVaeDataGenerator(keras.utils.Sequence):
'''
Generate the training and validation data
for the content part of vbae model.
'''
def __init__(self,
data_root,
phase,
batch_size,
batch_num=None,
prev_layers=[],
noise_type=None,
joint=False,
shuffle=True):
assert phase in ["train", "val", "test"], "Phase must be [train, val, test]"
if phase == "train":
self.user_ids = np.array(pd.unique(pd.read_csv(
os.path.join(data_root, "train.csv"))["uid"]), dtype=np.int32)
else:
self.user_ids = np.array(pd.unique(pd.read_csv(
os.path.join(data_root, "{}_obs.csv".format(phase)))["uid"]), dtype=np.int32)
feature_path = os.path.join(data_root, "features.npz")
self.features = sparse.load_npz(feature_path)[self.user_ids-1]
self.num_users = self.features.shape[0]
self.batch_size = batch_size
self.batch_num = batch_num
if prev_layers != []:
self.apply_prev_layers(self.features, prev_layers)
### Whether or not, or add which type of noise.
self.noise_type = noise_type
### Shuffle the items if necessary.
self.indexes = np.arange(self.num_users)
self.shuffle = shuffle
if self.shuffle:
self.on_epoch_end()
### Train jointly with the collaborative part
self.joint = joint
def __len__(self):
'''
The total number of batches.
'''
if self.batch_num is None:
batch_num = self.num_users//self.batch_size
if self.num_users%self.batch_size != 0:
batch_num+=1
else:
batch_num = self.batch_num
return batch_num
def __getitem__(self, i):
'''
Return the batch indexed by i.
'''
batch_idxes = self.indexes[i*self.batch_size:(i+1)*self.batch_size]
batch_target = self.features[batch_idxes].toarray()
if self.noise_type is None:
batch_input = batch_target
else:
batch_input = self.add_noise(self.noise_type, batch_target)
if self.joint:
batch_input = [batch_input, self.z_b[batch_idxes], self.d[batch_idxes]]
batch_target = [batch_target, self.ratings[batch_idxes].toarray()]
return batch_input, batch_target
def apply_prev_layers(self, features, prev_layers):
'''
Apply the previous pretrained layers on the feature
'''
batch_num = self.__len__()
ori_features = features.toarray()
for prev_layer in prev_layers:
new_dims = prev_layer.outputs[0].shape.as_list()[-1]
new_features = np.zeros((self.num_users, new_dims), dtype=np.float32)
for i in range(batch_num):
new_features[i*self.batch_size:(i+1)*self.batch_size] = prev_layer(
ori_features[i*self.batch_size:(i+1)*self.batch_size]
)
ori_features = new_features
self.features = sparse.csr_matrix(new_features)
def on_epoch_end(self):
'''
Shuffle the item index after each epoch.
'''
if self.shuffle:
np.random.shuffle(self.indexes)
def add_noise(self, noise_type, contents):
'''
corrupt the inputs and train as SDAE style.
'''
if 'Mask' in noise_type:
frac = float(noise_type.split('-')[1])
masked_contents = np.copy(contents)
for item in masked_contents:
zero_pos = np.random.choice(len(item), int(round(
frac*len(item))), replace=False)
item[zero_pos] = 0
return masked_contents
else:
raise NotImplementedError
def set_ratings(self, ratings):
self.ratings = ratings
def update_previous_bstep(self, z_b, logits, d):
self.z_b, self.alpha, self.d = z_b, sigmoid(logits), d
@property
def feature_dim(self):
return self.features.shape[-1]
class CollaborativeVAEDataGenerator(keras.utils.Sequence):
def __init__(self,
data_root,
phase,
batch_size,
reuse=True,
joint=True,
shuffle=True):
'''
Generate the training and validation data
for the collaborative part of vbae model.
'''
assert phase in ["train", "val", "test"], "Phase must be [train, val, test]"
self.phase = phase
self.batch_size = batch_size
self.data_root = data_root
self.__load_data(data_root, reuse=reuse)
self.shuffle = shuffle
if self.shuffle:
self.on_epoch_end()
### Train jointly with the content part
self.joint = joint
def __load_data(self, data_root, reuse):
### Load the dataset
meta_table = pd.read_csv(os.path.join(data_root, "meta.csv"))
self.num_items = meta_table["num_items"][0]
if self.phase == "train":
obs_path = os.path.join(data_root, "train.csv")
obs_records = pd.read_csv(obs_path)
obs_group = obs_records.groupby("uid")
unk_group = obs_group
else:
obs_path = os.path.join(data_root, "{}_obs.csv".format(self.phase))
unk_path = os.path.join(data_root, "{}_unk.csv".format(self.phase))
obs_records = | pd.read_csv(obs_path) | pandas.read_csv |
import numpy as np
import pandas as pd
from tqdm import tqdm
import datetime as dt
from collections import defaultdict
from dateutil.relativedelta import relativedelta
def collect_dates_for_cohort(df_pop, control_reservoir, control_dates, col_names=None):
'''
Fill 'control_used' dictionary with the dates (specified in 'control_dates') of each person
(represented by their CPF) regarding the main events considered in the analysis.
Args:
df_pop:
pandas.DataFrame.
control_reservoir:
collections.defaultdict.
control_used:
collections.defaultdict.
control_dates:
collections.defaultdict.
col_names:
dictionary.
Return:
None.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)",
"HOSPITALIZACAO COVID": "DATA HOSPITALIZACAO",
}
for j in tqdm(range(df_pop.shape[0])):
cpf = df_pop["CPF"].iat[j]
sex, age = df_pop["SEXO"].iat[j], df_pop["IDADE"].iat[j]
# Different outcomes' dates
dt_d1 = df_pop[col_names["D1"]].iat[j]
dt_d2 = df_pop[col_names["D2"]].iat[j]
dt_death = df_pop[col_names["OBITO COVID"]].iat[j]
dt_death_general = df_pop[col_names["OBITO GERAL"]].iat[j]
dt_hosp_covid = df_pop[col_names["HOSPITALIZACAO COVID"]].iat[j]
control_reservoir[(age,sex)].append(cpf)
if pd.notna(dt_d1):
control_dates["D1"][cpf] = dt_d1
if pd.notna(dt_d2):
control_dates["D2"][cpf] = dt_d2
if pd.notna(dt_death):
control_dates["DEATH COVID"][cpf] = dt_death
if pd.notna(dt_death_general):
control_dates["DEATH GENERAL"][cpf] = dt_death_general
if pd.notna(dt_hosp_covid):
control_dates["HOSPITALIZATION COVID"][cpf] = dt_hosp_covid
def rearrange_controls(control_reservoir, seed):
'''
Shuffle the order of the controls in the structure containing all
control candidates.
Args:
control_reservoir:
collections.defaultdict.
seed:
Integer.
Return:
None.
'''
np.random.seed(seed)
for key in control_reservoir.keys():
np.random.shuffle(control_reservoir[key])
def perform_matching(datelst, df_vac, control_reservoir, control_used, control_dates, col_names):
'''
Description.
Args:
datelst:
List of datetime.date.
df_vac:
pandas.DataFrame.
control_reservoir:
collections.defaultdict.
control_used:
collections.defaultdict.
control_dates:
collections.defaultdict.
col_names:
dictionary.
Return:
pareados:
pandas.DataFrame.
matched:
dictionary.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)"
}
matchings = defaultdict(lambda:-1)
matched = defaultdict(lambda:False)
for current_date in tqdm(datelst):
# Select all people who was vaccinated at the current date
df_vac["compare_date"] = df_vac[col_names["D1"]].apply(lambda x: True if x==current_date else False)
current_vaccinated = df_vac[df_vac["compare_date"]==True]
cpf_list = current_vaccinated["CPF"].tolist()
age_list = current_vaccinated["IDADE"].tolist()
sex_list = current_vaccinated["SEXO"].tolist()
# For each person vaccinated at the current date, check if there is a control for he/she.
for j in range(0, len(cpf_list)):
pair = find_pair(current_date, age_list[j], sex_list[j], control_reservoir, control_used, control_dates)
if pair!=-1:
matchings[cpf_list[j]] = pair
items_matching = matchings.items()
pareados = pd.DataFrame({"CPF CASO": [ x[0] for x in items_matching ], "CPF CONTROLE": [ x[1] for x in items_matching ]})
for cpf in [ x[0] for x in items_matching ]+[ x[1] for x in items_matching ]:
matched[cpf]=True
return pareados, matched
def get_events(df_pop, pareados, matched, col_names):
'''
Description.
Args:
df_pop:
pareados:
matched:
col_names:
Return:
datas:
pandas.DataFrame.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)"
}
data_obito = defaultdict(lambda:np.nan)
data_obito_geral = defaultdict(lambda:np.nan)
data_d1 = defaultdict(lambda:np.nan)
data_d2 = defaultdict(lambda:np.nan)
for j in range(df_pop.shape[0]):
cpf = df_pop["CPF"].iat[j]
d1_dt = df_pop[col_names["D1"]].iat[j]
d2_dt = df_pop[col_names["D2"]].iat[j]
obito = df_pop[col_names["OBITO COVID"]].iat[j]
obito_geral = df_pop[col_names["OBITO GERAL"]].iat[j]
#teste = df_pop["DATA SOLICITACAO(TESTES)"].iat[j]
if not pd.isna(obito):
data_obito[cpf] = obito
elif not pd.isna(obito_geral):
data_obito_geral[cpf] = obito_geral
if not pd.isna(d1_dt):
data_d1[cpf] = d1_dt
if not pd.isna(d2_dt):
data_d2[cpf] = d2_dt
# -- create cols with dates --
datas = {
"CPF": [], "DATA D1": [], "DATA D2": [],
"DATA OBITO COVID": [], "DATA OBITO GERAL": [],
"TIPO": [], "PAR": [], "PAREADO": []
}
print("Criando tabela de eventos ...")
for j in tqdm(range(0, pareados.shape[0])):
cpf_caso = pareados["CPF CASO"].iat[j]
cpf_control = pareados["CPF CONTROLE"].iat[j]
# Fill new columns
datas["CPF"] += [cpf_caso, cpf_control]
datas["DATA D1"] += [data_d1[cpf_caso], data_d1[cpf_control]]
datas["DATA D2"] += [data_d2[cpf_caso], data_d2[cpf_control]]
datas["DATA OBITO COVID"] += [data_obito[cpf_caso], data_obito[cpf_control]]
datas["DATA OBITO GERAL"] += [data_obito_geral[cpf_caso], data_obito_geral[cpf_control]]
#datas["DATA HOSPITALIZACAO"] += [data_hospitalizado[cpf_caso], data_hospitalizado[cpf_control]]
#datas["DATA TESTE"] += [data_teste[cpf_caso], data_teste[cpf_control]]
datas["TIPO"] += ["CASO", "CONTROLE"]
datas["PAR"] += [cpf_control, cpf_caso]
datas["PAREADO"] += [True, True]
print("Criando tabela de eventos ... Concluído")
print("Incluindo não pareados ...")
for j in tqdm(range(df_pop.shape[0])):
cpf = df_pop["CPF"].iat[j]
if matched[cpf]==False:
datas["CPF"] += [cpf]
datas["DATA D1"] += [data_d1[cpf]]
datas["DATA D2"] += [data_d2[cpf]]
datas["DATA OBITO COVID"] += [data_obito[cpf]]
datas["DATA OBITO GERAL"] += [data_obito_geral[cpf]]
#datas["DATA HOSPITALIZACAO"] += [data_hospitalizado[cpf]]
#datas["DATA TESTE"] += [data_teste[cpf]]
datas["TIPO"] += ["NAO PAREADO"]
datas["PAR"] += [np.nan]
datas["PAREADO"] += [False]
print("Incluindo não pareados ... Concluído.")
datas = pd.DataFrame(datas)
return datas
def get_events_per_pair(df_pop, pareados, col_names):
'''
Description.
Args:
df_pop:
pareados:
matched:
col_names:
Return:
datas:
pandas.DataFrame.
'''
if col_names is None:
col_names = {
"D1": "data D1(VACINADOS)",
"D2": "data D2(VACINADOS)",
"OBITO COVID": "data_obito(OBITO COVID)",
"OBITO GERAL": "data falecimento(CARTORIOS)"
}
data_obito = defaultdict(lambda:np.nan)
data_obito_geral = defaultdict(lambda:np.nan)
data_d1 = defaultdict(lambda:np.nan)
data_d2 = defaultdict(lambda:np.nan)
for j in range(df_pop.shape[0]):
cpf = df_pop["cpf"].iat[j]
d1_dt = df_pop[col_names["D1"]].iat[j]
d2_dt = df_pop[col_names["D2"]].iat[j]
obito = df_pop[col_names["OBITO COVID"]].iat[j]
obito_geral = df_pop[col_names["OBITO GERAL"]].iat[j]
#teste = df_pop["DATA SOLICITACAO(TESTES)"].iat[j]
if not pd.isna(obito):
data_obito[cpf] = obito
elif not pd.isna(obito_geral):
data_obito_geral[cpf] = obito_geral
if not pd.isna(d1_dt):
data_d1[cpf] = d1_dt
if not pd.isna(d2_dt):
data_d2[cpf] = d2_dt
# -- create cols with dates --
datas = {
"CPF CASO": [], "DATA D1 CASO": [], "DATA D2 CASO": [],
"DATA OBITO COVID CASO": [], "DATA OBITO GERAL CASO": [],
"CPF CONTROLE": [], "DATA D1 CONTROLE": [], "DATA D2 CONTROLE": [],
"DATA OBITO COVID CONTROLE": [], "DATA OBITO GERAL CONTROLE": []
}
print("Criando tabela de eventos por par ...")
for j in tqdm(range(0, pareados.shape[0])):
cpf_caso = pareados["CPF CASO"].iat[j]
cpf_control = pareados["CPF CONTROLE"].iat[j]
# Fill new columns
datas["CPF CASO"] += [cpf_caso]
datas["CPF CONTROLE"] += [cpf_control]
datas["DATA D1 CASO"] += [data_d1[cpf_caso]]
datas["DATA D1 CONTROLE"] += [data_d1[cpf_control]]
datas["DATA D2 CASO"] += [data_d2[cpf_caso]]
datas["DATA D2 CONTROLE"] += [data_d2[cpf_control]]
datas["DATA OBITO COVID CASO"] += [data_obito[cpf_caso]]
datas["DATA OBITO COVID CONTROLE"] += [data_obito[cpf_control]]
datas["DATA OBITO GERAL CASO"] += [data_obito_geral[cpf_caso]]
datas["DATA OBITO GERAL CONTROLE"] += [data_obito_geral[cpf_control]]
print("Criando tabela de eventos por par ... Concluído")
datas = pd.DataFrame(datas)
return datas
def get_intervals_events(events_pair_df, final_cohort, which="D1"):
'''
Calculate the intervals between the start of the pair's cohort and all
possible events for the case and control.
For both case and control individuals, there 4 possible events:
- Death by Covid (Outcome)
- Death due to another cause (Censored)
- Control vaccination (Censored)
- End of the cohort (Censored)
The intervals are calculated for all events, and for the survival analysis
only the earliest event should be considered.
Args:
events_pair_df:
pandas.DataFrame.
Return:
data:
pandas.DataFrame
Return:
...
'''
# Column names translator.
colname = {
"CPF CASO": "CPF CASO", "D1 CASO": "DATA D1 CASO",
"D2 CASO": "DATA D2 CASO", "OBITO CASO": "DATA OBITO GERAL CASO",
"OBITO COVID CASO": "DATA OBITO COVID CASO",
"CPF CONTROLE": "CPF CONTROLE", "D1 CONTROLE": "DATA D1 CONTROLE",
"D2 CONTROLE": "DATA D2 CONTROLE", "OBITO CONTROLE": "DATA OBITO GERAL CONTROLE",
"OBITO COVID CONTROLE": "DATA OBITO COVID CONTROLE",
}
# Calculate intervals for case.
sbst1 = [colname["OBITO COVID CASO"], colname[f"{which} CASO"]]
sbst2 = [colname["OBITO CASO"], colname[f"{which} CASO"]]
events_pair_df[f"INTV OBITO COVID CASO({which})"] = events_pair_df[sbst1].apply(lambda x: calc_interval(x,sbst1), axis=1)
events_pair_df[f"INTV OBITO GERAL CASO({which})"] = events_pair_df[sbst2].apply(lambda x: calc_interval(x,sbst2), axis=1)
sbst_d1d2 = [colname[f"D2 CASO"], colname[f"D1 CASO"]]
if which=="D1":
events_pair_df[f"INTV D2-D1 CASO"] = events_pair_df[sbst_d1d2].apply(lambda x: calc_interval(x, sbst_d1d2), axis=1)
# Calculate intervals for control
sbst1 = [colname["OBITO COVID CONTROLE"], colname[f"{which} CASO"]]
sbst2 = [colname["OBITO CONTROLE"], colname[f"{which} CASO"]]
events_pair_df[f"INTV OBITO COVID CONTROLE({which})"] = events_pair_df[sbst1].apply(lambda x: calc_interval(x,sbst1), axis=1)
events_pair_df[f"INTV OBITO GERAL CONTROLE({which})"] = events_pair_df[sbst2].apply(lambda x: calc_interval(x,sbst2), axis=1)
sbst_d1d2 = [colname[f"D2 CONTROLE"], colname[f"D1 CONTROLE"]]
if which=="D1":
events_pair_df[f"INTV D2-D1 CONTROLE"] = events_pair_df[sbst_d1d2].apply(lambda x: calc_interval(x, sbst_d1d2), axis=1)
# Interval in common for both individuals
sbst_d1 = [colname["D1 CONTROLE"], colname[f"{which} CASO"]]
events_pair_df[f"INTV D1 CASO CONTROLE({which})"] = events_pair_df[sbst_d1].apply(lambda x: calc_interval(x,sbst_d1), axis=1)
events_pair_df[f"INTV FIM COORTE({which})"] = events_pair_df[colname[f"{which} CASO"]].apply(lambda x: (final_cohort-x.date()).days if not pd.isna(x) else np.nan)
return events_pair_df
def get_intervals(events_pair_df, final_cohort=dt.date(2021, 8, 31)):
'''
Description.
Args:
events_df:
pandas.DataFrame.
Return:
data:
pandas.DataFrame.
'''
colname = {
"CPF CASO": "CPF CASO",
"D1 CASO": "DATA D1 CASO",
"D2 CASO": "DATA D2 CASO",
"OBITO CASO": "DATA OBITO GERAL CASO",
"OBITO COVID CASO": "DATA OBITO COVID CASO",
"CPF CONTROLE": "CPF CONTROLE",
"D1 CONTROLE": "DATA D1 CONTROLE",
"D2 CONTROLE": "DATA D2 CONTROLE",
"OBITO CONTROLE": "DATA OBITO GERAL CONTROLE",
"OBITO COVID CONTROLE": "DATA OBITO COVID CONTROLE",
}
data = {
"CPF": [], "DATA D1": [], "DATA D2": [], "DATA OBITO COVID": [],
"DATA OBITO GERAL": [], "TIPO": [], "PAR": [], "PAREADO": [],
"OBITO COVID DURACAO": [], "COM DESFECHO - OBITO COVID": []
}
# --> Go through each pair
for j in tqdm(range(events_pair_df.shape[0])):
cpf_caso = events_pair_df[colname["CPF CASO"]].iat[j]
cpf_controle = events_pair_df[colname["CPF CONTROLE"]].iat[j]
d2_caso = events_pair_df[colname["D2 CASO"]].iat[j]
d2_controle = events_pair_df[colname["D2 CONTROLE"]].iat[j]
init = events_pair_df[colname["D1 CASO"]].iat[j].date()
events_caso = {
"OBITO CASO": events_pair_df[colname["OBITO CASO"]].iat[j],
"OBITO COVID CASO": events_pair_df[colname["OBITO COVID CASO"]].iat[j],
"COORTE FINAL": final_cohort
}
events_controle = {
"D1 CONTROLE": events_pair_df[colname["D1 CONTROLE"]].iat[j],
"OBITO CONTROLE": events_pair_df[colname["OBITO CONTROLE"]].iat[j],
"OBITO COVID CONTROLE": events_pair_df[colname["OBITO COVID CONTROLE"]].iat[j],
"COORTE FINAL": final_cohort
}
# Convert date strings to date formats.
for key in events_caso.keys():
if not pd.isna(events_caso[key]) and type(events_caso[key])!=dt.date:
events_caso[key] = events_caso[key].date()
for key in events_controle.keys():
if not pd.isna(events_controle[key]) and type(events_controle[key])!=dt.date:
events_controle[key] = events_controle[key].date()
# Determine final day of each person of the pair.
# --> For case:
timeline_namecaso = ["D1 CONTROLE", "OBITO COVID CASO", "OBITO CASO", "COORTE FINAL"]
timeline_caso = [events_controle["D1 CONTROLE"], events_caso["OBITO COVID CASO"],
events_caso["OBITO CASO"], events_caso["COORTE FINAL"]]
# replace NaN for any date later than "COORTE FINAL"
timeline_caso = [x if not pd.isna(x) else dt.date(2050, 1, 1) for x in timeline_caso ]
sorted_tp_caso = sorted(zip(timeline_caso, timeline_namecaso))
final_namecaso = sorted_tp_caso[0][1]
final_caso = sorted_tp_caso[0][0]
interval_caso = (final_caso-init).days
#print(sorted_tp_caso, interval_caso, final_namecaso, final_caso)
if final_namecaso!="OBITO COVID CASO":
type_caso = False
else:
type_caso = True
# --> For control:
timeline_namecontrole = ["D1 CONTROLE", "OBITO COVID CONTROLE", "OBITO CONTROLE", "COORTE FINAL"]
timeline_controle = [events_controle["D1 CONTROLE"], events_controle["OBITO COVID CONTROLE"],
events_controle["OBITO CONTROLE"], events_controle["COORTE FINAL"]]
timeline_controle = [x if not | pd.isna(x) | pandas.isna |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 17 11:56:35 2019
@author: hcamphausen
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.preprocessing import MinMaxScaler
df = pd.read_csv('train.csv', index_col=0, parse_dates=True)
plt.figure(figsize=(8,4))
sns.boxplot(x='season', y='count', data=df)
plt.figure(figsize=(16,8))
sns.regplot(x='windspeed', y='count', data=df)
plt.figure(figsize=(30,8))
sns.boxplot(x='humidity', y='count', data=df)
plt.figure(figsize=(16,8))
sns.regplot(x='humidity', y='count', data=df)
# FEATURE ENGINEERING
def feature_engineering(df):
# drop columns that test data does not have
if 'casual' and 'registered' in df.columns:
df.drop(['casual', 'registered'], axis=1, inplace=True)
else:
pass
# one-hot encoding season
one_hot_encoded = pd.get_dummies(df['season'])
df = pd.concat((df, one_hot_encoded), axis=1)
df.rename(columns={1: "spring", 2: "summer", 3: "fall", 4: "winter"}, inplace=True)
df.drop(['season'], axis = 1, inplace=True)
#weather - 1: Clear, 2Few clouds, 3Partly cloudy, 4Partly cloudy
one_hot_encoded_2 = pd.get_dummies(df['weather'])
df = pd.concat((df, one_hot_encoded_2), axis=1)
df.rename(columns={1:"clear",2:"few_clouds",3:"partly_cloudy",4:"cloudy"}, inplace=True)
df.drop(['cloudy'], axis=1, inplace=True)
df.drop(['weather'], axis=1, inplace=True)
# log count - remember to exponent count for test predictions
df['count_log'] = np.log1p(df['count'])
df.drop(['count'], axis=1, inplace=True)
# add hour column
df['hour'] = df.index.hour
#df['year'] = df.index.year
#df['month'] = df.index.month
#df['day'] = df.index.day
df['dayofweek'] = df.index.dayofweek
# one hot encoding hour and dayof week
one_hot_encoded_day_of_week = pd.get_dummies(df['dayofweek'])
df = pd.concat((df, one_hot_encoded_day_of_week), axis=1)
df.rename(columns={0:"Monday",1:"Tuesday",2:"Wednesday",3:"Thursday",4:"Friday",5:"Saturday",6:"Sunday"}, inplace=True)
df.drop(['dayofweek'], axis=1, inplace=True)
one_hot_encoded_hour = pd.get_dummies(df['hour'])
df = pd.concat((df, one_hot_encoded_hour), axis=1)
df.drop(['hour'], axis=1, inplace=True)
# drop temperatures
df.drop(['temp','atemp'], axis=1, inplace=True)
# drop holiday as super small dataset for 1
df.drop(['holiday'], axis = 1, inplace=True)
#scaling data
#scaler = MinMaxScaler()
#df[['humidity', 'windspeed']] = scaler.fit_transform(df[['humidity', 'windspeed']])
#drop windspeed as weird measurings
df.drop(['windspeed'], axis=1, inplace=True)
#drop humidity as weird measurings
df.drop(['humidity'], axis=1, inplace=True)
return df
df_train = feature_engineering(df)
df_train.head()
plt.figure(figsize=(8,4))
sns.heatmap(df_train.corr(), cmap='Oranges', annot=True)
corr = df_train[df_train.columns[1:]].corr()['count_log'][:]
df_corr = pd.DataFrame(data=corr)
df_corr.plot.bar()
# SPLITTING TRAIN AND TEST DATA SET
df_train.columns
# suffle : df_train = df_train.sample(len(df_train))
X = df_train[['workingday', 'spring', 'summer', 'fall',
'winter', 'clear', 'few_clouds', 'partly_cloudy',
'Monday', 'Tuesday', 'Wednesday',
'Thursday', 'Friday', 'Saturday', 'Sunday',
0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15,
16, 17, 18, 19,
20, 21, 22, 23
]]
y = df_train['count_log']
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.5)
pipeline = make_pipeline(
MinMaxScaler(), # transform
Ridge(alpha=0.0) # predict
)
# Hyperparameter Optimization
g = GridSearchCV(pipeline, cv=5, param_grid={
'ridge__alpha': [0.0, 0.1, 0.01, 0.001]
})
#fitting model
g.fit(Xtrain,ytrain)
# train vs. test scores
train_score = g.score(Xtrain, ytrain)
print("This is my training score: " + str(train_score))
test_score = g.score(Xtest, ytest)
print("This is my testing score: " + str(test_score))
y_pred = g.predict(Xtrain)
mse_train = mean_squared_error(ytrain, y_pred)
print("This is my train MSE: " + str(mse_train))
y_predtest = g.predict(Xtest)
mse_test = mean_squared_error(ytest, y_predtest)
print("This is my test MSE: " + str(mse_test))
y2 = np.expm1(ytrain)
y2pred = np.expm1(y_pred)
mae_train = mean_absolute_error(y2, y2pred)
print("This is my train MAE: " + str(mae_train))
y3 = np.expm1(ytest)
y3pred = np.expm1(y_predtest)
mae_test = mean_absolute_error(y3, y3pred)
print("This is my test MAE: " + str(mae_test))
#######################
#CHECKING ASSUMPTIONS
sns.jointplot(ytrain,y_pred, kind="reg")
# Autocorrelation - <NAME> (aim at 2)
from statsmodels.stats.stattools import durbin_watson
print(durbin_watson(ytrain-y_pred, axis=0))
# Sum of residuals = 0?
residuals = ytrain - y_pred
residuals = np.array(residuals)
sum_res = residuals.mean().round(5)
print("The sum of my residuals is :" + str(sum_res))
# Normal distribution of residuals?
plt.hist(residuals, bins=20)
# Change in variance - homoscedasticity / heteroscedasticity
import statsmodels.api as sm
pl = sm.qqplot(residuals, line='r')
# Are features linearly independent?
from statsmodels.stats.outliers_influence import variance_inflation_factor as VIF
vifs = [VIF(df_train.values, i) for i, colname in enumerate(df_train)]
s = pd.Series(vifs,index=df_train.columns)
s.plot.bar()
##########################
# Kaggle test set
kaggle_test = pd.read_csv('test.csv', parse_dates=True,index_col=0)
def feature_engineering_test(df):
# drop columns that test data does not have
if 'casual' and 'registered' in df.columns:
df.drop(['casual', 'registered'], axis=1, inplace=True)
else:
pass
# one-hot encoding season
one_hot_encoded = pd.get_dummies(df['season'])
df = pd.concat((df, one_hot_encoded), axis=1)
df.rename(columns={1: "spring", 2: "summer", 3: "fall", 4: "winter"}, inplace=True)
df.drop(['season'], axis = 1, inplace=True)
#weather - 1: Clear, 2Few clouds, 3Partly cloudy, 4Partly cloudy
one_hot_encoded_2 = pd.get_dummies(df['weather'])
df = pd.concat((df, one_hot_encoded_2), axis=1)
df.rename(columns={1:"clear",2:"few_clouds",3:"partly_cloudy",4:"cloudy"}, inplace=True)
df.drop(['cloudy'], axis=1, inplace=True)
df.drop(['weather'], axis=1, inplace=True)
# add hour column
df['hour'] = df.index.hour
df['dayofweek'] = df.index.dayofweek
# one hot encoding hour and dayof week
one_hot_encoded_day_of_week = pd.get_dummies(df['dayofweek'])
df = pd.concat((df, one_hot_encoded_day_of_week), axis=1)
df.rename(columns={0:"Monday",1:"Tuesday",2:"Wednesday",3:"Thursday",4:"Friday",5:"Saturday",6:"Sunday"}, inplace=True)
df.drop(['dayofweek'], axis=1, inplace=True)
one_hot_encoded_hour = pd.get_dummies(df['hour'])
df = | pd.concat((df, one_hot_encoded_hour), axis=1) | pandas.concat |
from statsmodels.compat import lrange
from statsmodels.iolib import SimpleTable
from .DemeanDataframe import demean_dataframe,demeanonex
from .FormTransfer import form_transfer
from .OLSFixed import OLSFixed
from .RobustErr import robust_err
from .CalDf import cal_df
from .CalFullModel import cal_fullmodel
from .Forg import forg
from .WaldTest import waldtest
import statsmodels.api as sm
from scipy.stats import t
from scipy.stats import f
import time
import numpy as np
import pandas as pd
from multiprocessing import Pool, Manager
from .ClusterErr import clustered_error, is_nested, min_clust
def ols_high_d_category_new(data_df, consist_input=None, out_input=None, category_input=None, cluster_input=[],
endog_x_input=[], iv_col_input=[],formula=None, robust=False, c_method='cgm', psdef=True,
epsilon=1e-8, max_iter=1e6,
process=5):
"""
:param endog_x_input: List of endogenous variables
:param iv_col_input: List of instrument variables
:param data_df: Dataframe of relevant data
:param consist_input: List of continuous variables
:param out_input: List of dependent variables(so far, only support one dependent variable)
:param category_input: List of category variables(fixed effects)
:param cluster_input: List of cluster variables
:param formula: a string like 'y~x+x2|id+firm|id',dependent_variable~continuous_variable|fixed_effect|clusters
:param robust: bool value of whether to get a robust variance
:param c_method: method used to calculate multi-way clusters variance. Possible choices are:
- 'cgm'
- 'cgm2'
:param psdef:if True, replace negative eigenvalue of variance matrix with 0 (only in multi-way clusters variance)
:param epsilon: tolerance of the demean process
:param max_iter: max iteration of the demean process
:param process: number of process in multiprocessing(only in multi-way clusters variance calculating)
:return:params,df,bse,tvalues,pvalues,rsquared,rsquared_adj,fvalue,f_pvalue,variance_matrix,fittedvalues,resid,summary
"""
if (consist_input is None) & (formula is None):
raise NameError('You have to input list of variables name or formula')
elif consist_input is None:
out_col, consist_col, category_col, cluster_col, endog_x, iv_col = form_transfer(formula)
print('dependent variable(s):', out_col)
print('continuous variables:', consist_col)
print('category variables(fixed effects):', category_col)
print('cluster variables:', cluster_col)
if endog_x:
print('endogenous variables:', endog_x)
print('instruments:', iv_col)
else:
out_col, consist_col, category_col, cluster_col, endog_x, iv_col = out_input, consist_input, category_input, \
cluster_input, endog_x_input, iv_col_input
print('dependent variable(s):', out_col)
print('continuous variables:', consist_col)
print('category variables(fixed effects):', category_col)
print('cluster variables:', cluster_col)
if endog_x:
print('endogenous variables:', endog_x)
print('instruments:', iv_col)
if category_col[0] == '0' or category_col == []:
demeaned_df = data_df.copy()
# const_consist = sm.add_constant(demeaned_df[consist_col])
# # print(consist_col)
# consist_col = ['const'] + consist_col
# demeaned_df['const'] = const_consist['const']
# print('Since the model does not have fixed effect, add an intercept.')
rank = 0
else:
consist_var = []
for i in consist_col:
consist_var.append(i)
for i in endog_x:
consist_var.append(i)
for i in iv_col:
consist_var.append(i)
consist_var.append(out_col[0])
start = time.process_time()
demeaned_df = data_df.copy()
pool = Pool(processes=len(consist_var))
job = []
m = Manager()
return_dict = m.dict()
for consist in consist_var:
print(consist)
a = pool.apply_async(demeanonex, args=(data_df, consist, category_col, return_dict, epsilon, max_iter))
job.append(a)
pool.close()
pool.join()
return_dict = dict(return_dict)
demeaned_df[consist_var] = | pd.DataFrame.from_dict(return_dict) | pandas.DataFrame.from_dict |
import unittest
import pandas as pd
from code.feature_extraction.list_counter import PhotosNum, URLsNum, HashtagNum, MentionNum, TokenNum
from code.util import COLUMN_PHOTOS, COLUMN_URLS, COLUMN_HASHTAGS, COLUMN_MENTIONS
class PhotosNumTest(unittest.TestCase):
def setUp(self) -> None:
self.INPUT_COLUMN = COLUMN_PHOTOS
self.extractor = PhotosNum(self.INPUT_COLUMN)
def test_photos_num(self):
input_data = '''['www.hashtag.de/234234.jpg', 'www.yolo.us/g5h23g45f.png', 'www.data.it/246gkjnbvh2.jpg']'''
input_df = | pd.DataFrame([COLUMN_PHOTOS]) | pandas.DataFrame |
""" Feature generation based on deep learning for images
wrapped into d3m format
TODO: update primitive info
"""
import importlib
import logging
import numpy as np
import pandas as pd
import os
import shutil
import sys
import typing
from scipy.misc import imresize
import d3m.metadata.base as mbase
from d3m.primitive_interfaces.featurization import FeaturizationTransformerPrimitiveBase
from d3m.primitive_interfaces.base import CallResult
from d3m.metadata import hyperparams
from d3m import container
from . import config
logger = logging.getLogger(__name__)
# Input image tensor has 4 dimensions: (num_images, 244, 244, 3)
Inputs = container.List
# Output feature has 2 dimensions: (num_images, layer_size[layer_index])
Outputs = container.DataFrame # extracted features
class ResNet50Hyperparams(hyperparams.Hyperparams):
layer_index = hyperparams.UniformInt(
lower=0,
upper=11,
default=0,
description="Specify the layer of the neural network to use for features. Lower numbered layers correspond to higher-level abstract features. The number of features by layer index are [2048, 100352, 25088, 25088, 100352, 25088, 25088, 100352, 25088, 25088, 200704].",
semantic_types=["http://schema.org/Integer", "https://metadata.datadrivendiscovery.org/types/TuningParameter"]
)
# corresponding layer_size = [2048, 100352, 25088, 25088, 100352, 25088, 25088, 100352, 25088, 25088, 200704]
generate_metadata = hyperparams.UniformBool(
default=False,
description="A control parameter to set whether to generate metada after the feature extraction. It will be very slow if the columns length is very large. For the default condition, it will turn off to accelerate the program running.",
semantic_types=["http://schema.org/Boolean", "https://metadata.datadrivendiscovery.org/types/ControlParameter"]
)
class Vgg16Hyperparams(hyperparams.Hyperparams):
layer_index = hyperparams.UniformInt(
lower=0,
upper=4,
default=0,
description="Specify the layer of the neural network to use for features. Lower numbered layers correspond to higher-level abstract features. The number of features by layer index are [25088, 100352, 200704, 401408]",
semantic_types=["http://schema.org/Integer", "https://metadata.datadrivendiscovery.org/types/TuningParameter"]
)
generate_metadata = hyperparams.UniformBool(
default=False,
description="A control parameter to set whether to generate metada after the feature extraction. It will be very slow if the columns length is very large. For the default condition, it will turn off to accelerate the program running.",
semantic_types=["http://schema.org/Boolean", "https://metadata.datadrivendiscovery.org/types/ControlParameter"]
)
# corresponding layer_size = [25088, 100352, 200704, 401408]
class KerasPrimitive:
_weight_files = []
def __init__(self):
self._initialized = False
def _lazy_init(self):
if self._initialized:
return
# Lazy import modules as not to slow down d3m.index
global keras_models, keras_backend, tf
keras_models = importlib.import_module('keras.models')
keras_backend = importlib.import_module('keras.backend')
tf = importlib.import_module('tensorflow')
self._initialized = True
@staticmethod
def _get_keras_data_dir(cache_subdir='models'):
"""
Return Keras cache directory. See keras/utils/data_utils.py:get_file()
"""
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
return datadir
@staticmethod
def _get_weight_installation(weight_files: typing.List['WeightFile']):
"""
Return D3M file installation entries
"""
return [
{'type': 'FILE',
'key': weight_file.name,
'file_uri': weight_file.uri,
'file_digest': weight_file.digest}
for weight_file in weight_files]
def _setup_weight_files(self):
"""
Copy weight files from volume to Keras cache directory
"""
for file_info in self._weight_files:
if file_info.name in self.volumes:
dest = os.path.join(file_info.data_dir, file_info.name)
if not os.path.exists(dest):
shutil.copy2(self.volumes[file_info.name], dest)
else:
logger.warning('Keras weight file not in volume: {}'.format(file_info.name))
class WeightFile(typing.NamedTuple):
name: str
uri: str
digest: str
data_dir: str = KerasPrimitive._get_keras_data_dir()
class ResNet50ImageFeature(FeaturizationTransformerPrimitiveBase[Inputs, Outputs, ResNet50Hyperparams], KerasPrimitive):
"""
Image Feature Generation using pretrained deep neural network RestNet50.
Parameters
----------
_layer_index : int, default: 0, domain: range(11)
Layer of the network to use to generate features. Smaller
indices are closer to the output layers of the network.
_resize_data : Boolean, default: True, domain: {True, False}
If True resize images to 224 by 224.
"""
# Resnet50 weight files info is from here:
# https://github.com/keras-team/keras-applications/blob/master/keras_applications/resnet50.py
_weight_files = [
WeightFile('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.2/'
'resnet50_weights_tf_dim_ordering_tf_kernels.h5'),
'bdc6c9f787f9f51dffd50d895f86e469cc0eb8ba95fd61f0801b1a264acb4819'),
# WeightFile('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
# ('https://github.com/fchollet/deep-learning-models/'
# 'releases/download/v0.2/'
# 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'),
# 'a268eb855778b3df3c7506639542a6af')
]
__author__ = 'USC ISI'
metadata = hyperparams.base.PrimitiveMetadata({
'id': 'dsbox-featurizer-image-resnet50',
'version': config.VERSION,
'name': "DSBox Image Featurizer RestNet50",
'description': 'Generate image features using RestNet50',
'python_path': 'd3m.primitives.feature_extraction.ResNet50ImageFeature.DSBOX',
'primitive_family': "FEATURE_EXTRACTION",
'algorithm_types': ["FEEDFORWARD_NEURAL_NETWORK"],
'keywords': ['image', 'featurization', 'resnet50'],
'source': {
'name': config.D3M_PERFORMER_TEAM,
"contact": config.D3M_CONTACT,
'uris': [config.REPOSITORY]
},
# The same path the primitive is registered with entry points in setup.py.
'installation': [config.INSTALLATION] + KerasPrimitive._get_weight_installation(_weight_files),
# Choose these from a controlled vocabulary in the schema. If anything is missing which would
# best describe the primitive, make a merge request.
# A metafeature about preconditions required for this primitive to operate well.
'precondition': [],
'hyperparms_to_tune': []
})
def __init__(self, *, hyperparams: ResNet50Hyperparams, volumes: typing.Union[typing.Dict[str, str], None]=None) -> None:
super().__init__(hyperparams=hyperparams, volumes=volumes)
KerasPrimitive.__init__(self)
self.hyperparams = hyperparams
# All other attributes must be private with leading underscore
self._has_finished = False
self._iterations_done = False
# ============TODO: these three could be hyperparams=========
self._layer_index = hyperparams['layer_index']
self._preprocess_data = True
self._resize_data = True
self._RESNET50_MODEL = None
# ===========================================================
def _lazy_init(self):
if self._initialized:
return
KerasPrimitive._lazy_init(self)
# Lazy import modules as not to slow down d3m.index
global resnet50
resnet50 = importlib.import_module('keras.applications.resnet50')
self._setup_weight_files()
keras_backend.clear_session()
if self._RESNET50_MODEL is None:
original = sys.stdout
sys.stdout = sys.stderr
self._RESNET50_MODEL = resnet50.ResNet50(weights='imagenet')
sys.stdout = original
self._layer_numbers = [-2, -4, -8, -11, -14, -18, -21, -24, -30, -33, -36]
if self._layer_index < 0:
self._layer_index = 0
elif self._layer_index > len(self._layer_numbers):
self._layer_numbers = len(self._layer_numbers)-1
self._layer_number = self._layer_numbers[self._layer_index]
self._org_model = self._RESNET50_MODEL
self._model = keras_models.Model(self._org_model.input,
self._org_model.layers[self._layer_number].output)
self._graph = tf.get_default_graph()
self._annotation = None
def _preprocess(self, image_tensor):
"""Preprocess image data by modifying it directly"""
resnet50.preprocess_input(image_tensor)
def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:
"""Apply neural network-based feature extraction to image_tensor"""
self._lazy_init()
image_tensor = inputs[1]
image_d3mIndex = inputs[0]
# preprocess() modifies the data. For now just copy the data.
if not len(image_tensor.shape) == 4:
raise ValueError('Expect shape to have 4 dimension')
resized = False
if self._resize_data:
if not (image_tensor.shape[1] == 244 and image_tensor.shape[2] == 244):
resized = True
y = np.empty((image_tensor.shape[0], 224, 224, 3))
for index in range(image_tensor.shape[0]):
y[index] = imresize(image_tensor[index], (224, 224))
image_tensor = y
# preprocess() modifies the data. For now just copy the data.
if self._preprocess_data:
if resized:
# Okay to modify image_tensor, since its not input
data = image_tensor
else:
data = image_tensor.copy()
self._preprocess(data)
else:
data = image_tensor
# BUG fix: add global variable to fix ta3 system if calling multiple times of this primitive
with self._graph.as_default():
output_ndarray = self._model.predict(data)
output_ndarray = output_ndarray.reshape(output_ndarray.shape[0], -1).astype('float64') # change to astype float64 to resolve pca output
output_dataFrame = container.DataFrame(output_ndarray)
# update the original index to be d3mIndex
output_dataFrame = container.DataFrame(pd.concat([ | pd.DataFrame(image_d3mIndex, columns=['d3mIndex']) | pandas.DataFrame |
import pandas as pd
import jsonlines
import src.utils.io as io
from itertools import chain
from src.interface.corpus import Corpus
import logging as log
import numpy
class InputOutputHandler:
"""Interface between the provided training data and other modules.
When initialized author information for each doc are fetched from the database via the
provided Corpus object."""
def __init__(self,
corpus,
fsequence = "./training/training-sequence.tsv",
fquery = "./training/fair-TREC-training-sample.json",
fgroup = "./training/fair-TREC-sample-author-groups.csv"):
self.corpus = corpus
queries = io.read_jsonlines(fquery, handler = self.__unnest_query)
queries = list(chain.from_iterable(queries))
self.seq = | pd.read_csv(fsequence, names=["sid", "q_num", "qid"], sep='\.|,', engine='python') | pandas.read_csv |
import logging
from concurrent.futures.process import ProcessPoolExecutor
import numpy as np
import pandas as pd
from sklearn.linear_model import RANSACRegressor
from msi_recal.join_by_mz import join_by_mz
from msi_recal.math import peak_width, ppm_to_sigma_1
from msi_recal.mean_spectrum import representative_spectrum, hybrid_mean_spectrum
from msi_recal.params import RecalParams
from msi_recal.passes.transform import Transform
logger = logging.getLogger(__name__)
class AlignRansac(Transform):
CACHE_FIELDS = [
'min_mz',
'max_mz',
'coef_',
'lo_warp_',
'hi_warp_',
'target_spectrum',
]
def __init__(self, params: RecalParams, ppm='20'):
self.align_sigma_1 = ppm_to_sigma_1(float(ppm), params.analyzer, params.base_mz)
self.jitter_sigma_1 = params.jitter_sigma_1
self.analyzer = params.analyzer
self.min_mz = None
self.max_mz = None
self.coef_ = {}
self.lo_warp_ = {}
self.hi_warp_ = {}
self.target_spectrum = None
def fit(self, X):
missing_cols = {'mz', 'ints', 'mz'}.difference(X.columns)
assert not missing_cols, f'X is missing columns: {", ".join(missing_cols)}'
mean_spectrum = hybrid_mean_spectrum(X, self.analyzer, self.align_sigma_1)
spectrum = representative_spectrum(
X,
mean_spectrum,
self.analyzer,
self.align_sigma_1,
denoise=True,
)
self.target_spectrum = spectrum[['mz', 'ints']].sort_values('mz')
logger.info(f'Alignment spectrum has {len(spectrum)} peaks')
self.min_mz = np.round(X.mz.min(), -1)
self.max_mz = np.round(X.mz.max(), -1)
return self
def _align_ransac_inner(self, sp, mzs, ints):
hits = join_by_mz(
self.target_spectrum,
'mz',
| pd.DataFrame({'sample_mz': mzs, 'sample_ints': ints}) | pandas.DataFrame |
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
Series,
date_range,
)
import pandas._testing as tm
class TestDataFrameInterpolate:
def test_interpolate_inplace(self, frame_or_series, using_array_manager, request):
# GH#44749
if using_array_manager and frame_or_series is DataFrame:
mark = pytest.mark.xfail(reason=".values-based in-place check is invalid")
request.node.add_marker(mark)
obj = frame_or_series([1, np.nan, 2])
orig = obj.values
obj.interpolate(inplace=True)
expected = frame_or_series([1, 1.5, 2])
tm.assert_equal(obj, expected)
# check we operated *actually* inplace
assert np.shares_memory(orig, obj.values)
assert orig.squeeze()[1] == 1.5
def test_interp_basic(self):
df = DataFrame(
{
"A": [1, 2, np.nan, 4],
"B": [1, 4, 9, np.nan],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
expected = DataFrame(
{
"A": [1.0, 2.0, 3.0, 4.0],
"B": [1.0, 4.0, 9.0, 9.0],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
result = df.interpolate()
tm.assert_frame_equal(result, expected)
result = df.set_index("C").interpolate()
expected = df.set_index("C")
expected.loc[3, "A"] = 3
expected.loc[5, "B"] = 9
tm.assert_frame_equal(result, expected)
def test_interp_empty(self):
# https://github.com/pandas-dev/pandas/issues/35598
df = DataFrame()
result = df.interpolate()
assert result is not df
expected = df
tm.assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame(
{
"A": [1, 2, np.nan, 4],
"B": [1, 4, 9, np.nan],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
msg = (
r"method must be one of \['linear', 'time', 'index', 'values', "
r"'nearest', 'zero', 'slinear', 'quadratic', 'cubic', "
r"'barycentric', 'krogh', 'spline', 'polynomial', "
r"'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', "
r"'cubicspline'\]. Got 'not_a_method' instead."
)
with pytest.raises(ValueError, match=msg):
df.interpolate(method="not_a_method")
def test_interp_combo(self):
df = DataFrame(
{
"A": [1.0, 2.0, np.nan, 4.0],
"B": [1, 4, 9, np.nan],
"C": [1, 2, 3, 5],
"D": list("abcd"),
}
)
result = df["A"].interpolate()
expected = Series([1.0, 2.0, 3.0, 4.0], name="A")
tm.assert_series_equal(result, expected)
result = df["A"].interpolate(downcast="infer")
expected = Series([1, 2, 3, 4], name="A")
tm.assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({"A": [1, 2, np.nan, 4], "B": [np.nan, 2, 3, 4]})
df = df.set_index("A")
msg = (
"Interpolation with NaNs in the index has not been implemented. "
"Try filling those NaNs before interpolating."
)
with pytest.raises(NotImplementedError, match=msg):
df.interpolate(method="values")
@td.skip_if_no_scipy
def test_interp_various(self):
df = DataFrame(
{"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]}
)
df = df.set_index("C")
expected = df.copy()
result = df.interpolate(method="polynomial", order=1)
expected.loc[3, "A"] = 2.66666667
expected.loc[13, "A"] = 5.76923076
tm.assert_frame_equal(result, expected)
result = df.interpolate(method="cubic")
# GH #15662.
expected.loc[3, "A"] = 2.81547781
expected.loc[13, "A"] = 5.52964175
tm.assert_frame_equal(result, expected)
result = df.interpolate(method="nearest")
expected.loc[3, "A"] = 2
expected.loc[13, "A"] = 5
| tm.assert_frame_equal(result, expected, check_dtype=False) | pandas._testing.assert_frame_equal |
'''
Description: 生成1010格式数据的GAN
Author: xuzf
Date: 2021-05-09 14:22:22
FilePath: /Algorithm-Toy/GAN/1-1010-GAN.py
'''
import random
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def generate_real():
'''
@description: 生成1010格式的训练正样本(引入高斯噪声)
@param {none}
@return {tensor}
'''
real_data = torch.FloatTensor([
random.uniform(0.8, 1.0),
random.uniform(0.0, 0.2),
random.uniform(0.8, 1.0),
random.uniform(0.0, 0.2),
])
return real_data
def generate_random(size):
'''
@description: 生成训练负样本
@param {size: 生成样本的维度}
@return {tensor}
'''
random_data = torch.rand(size)
return random_data
# 判别器
class Discriminator(nn.Module):
def __init__(self):
# 调用父类的构造函数,初始化父类
super().__init__()
# 定义神经网络
self.model = nn.Sequential(nn.Linear(4, 3), nn.Sigmoid(),
nn.Linear(3, 1), nn.Sigmoid())
# 创建损失函数
self.loss_function = nn.MSELoss()
# 创建优化器,随机梯度下降
self.optimiser = torch.optim.SGD(self.parameters(), lr=0.01)
# 训练过程监控
self.counter = 0
self.process = []
def forward(self, inputs):
return self.model(inputs)
def train(self, inputs, targets):
# 计算网络的输出值
outputs = self.forward(inputs)
loss = self.loss_function(outputs, targets)
# 反向传播
self.optimiser.zero_grad()
loss.backward()
self.optimiser.step()
# 监控训练过程
self.counter += 1
if self.counter % 10 == 0:
self.process.append(loss.item())
if self.counter % 10000 == 0:
print('counter = {}'.format(self.counter))
def plot_process(self):
df = pd.DataFrame(self.process, columns=['loss'])
df.plot(ylim=(0, 1.0),
figsize=(16, 8),
alpha=0.1,
marker='.',
grid=True)
# 生成器
class Generator(nn.Module):
def __init__(self):
super().__init__()
# 定义神经网络
self.model = nn.Sequential(nn.Linear(1, 3), nn.Sigmoid(),
nn.Linear(3, 4), nn.Sigmoid())
# 创建优化器,随机梯度下降
self.optimiser = torch.optim.SGD(self.parameters(), lr=0.01)
# 训练过程监控
self.counter = 0
self.process = []
def forward(self, inputs):
return self.model(inputs)
def train(self, discriminator, inputs, targets):
# 生成器输出
gen_data = self.forward(inputs)
# 判别器预测
pred_data = discriminator(gen_data)
# 计算损失
self.optimiser.zero_grad()
loss = discriminator.loss_function(pred_data, targets)
# 监控训练过程
self.counter += 1
if self.counter % 10 == 0:
self.process.append(loss.item())
# 从判别器开始误差梯度的反向传播
loss.backward()
# 用生成器的优化器更新自身参数
self.optimiser.step()
def plot_process(self):
df = | pd.DataFrame(self.process, columns=['loss']) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pandas.testing as tm
import ibis
from ibis.expr import datatypes as dt
from ibis.expr import schema as sch
def test_infer_basic_types():
df = pd.DataFrame(
{
'bigint_col': np.array(
[0, 10, 20, 30, 40, 50, 60, 70, 80, 90], dtype='i8'
),
'bool_col': np.array(
[
True,
False,
True,
False,
True,
None,
True,
False,
True,
False,
],
dtype=np.bool_,
),
'bool_obj_col': np.array(
[
True,
False,
np.nan,
False,
True,
np.nan,
True,
np.nan,
True,
False,
],
dtype=np.object_,
),
'date_string_col': [
'11/01/10',
None,
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
'11/01/10',
],
'double_col': np.array(
[
0.0,
10.1,
np.nan,
30.299999999999997,
40.399999999999999,
50.5,
60.599999999999994,
70.700000000000003,
80.799999999999997,
90.899999999999991,
],
dtype=np.float64,
),
'float_col': np.array(
[
np.nan,
1.1000000238418579,
2.2000000476837158,
3.2999999523162842,
4.4000000953674316,
5.5,
6.5999999046325684,
7.6999998092651367,
8.8000001907348633,
9.8999996185302734,
],
dtype='f4',
),
'int_col': np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i4'),
'month': [11, 11, 11, 11, 2, 11, 11, 11, 11, 11],
'smallint_col': np.array(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype='i2'
),
'string_col': [
'0',
'1',
None,
'double , whammy',
'4',
'5',
'6',
'7',
'8',
'9',
],
'timestamp_col': [
pd.Timestamp('2010-11-01 00:00:00'),
None,
pd.Timestamp('2010-11-01 00:02:00.100000'),
pd.Timestamp('2010-11-01 00:03:00.300000'),
| pd.Timestamp('2010-11-01 00:04:00.600000') | pandas.Timestamp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 7 18:22:28 2020
@author: sergiomarconi
"""
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import LabelEncoder
from keras.layers import Embedding
from keras.layers.merge import concatenate
from keras.utils import plot_model
from keras.layers import Input
from keras.layers.merge import concatenate
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTENC
from imblearn.over_sampling import ADASYN
from collections import Counter
# prepare input data
def prepare_inputs(X_train, X_test, cats = ['domainID', 'siteID']):
X_train_enc, X_test_enc = list(), list()
# label encode each column
for i in cats:
le = LabelEncoder()
le.fit(X_train[i])
# encode
train_enc = le.transform(X_train[i])
test_enc = le.transform(X_test[i])
# store
X_train_enc.append(train_enc)
X_test_enc.append(test_enc)
return X_train_enc, X_test_enc
# prepare target
def prepare_targets(y_train, y_test):
le = LabelEncoder()
le.fit(y_train)
y_train_enc = le.transform(y_train)
y_test_enc = le.transform(y_test)
return y_train_enc, y_test_enc
def categorical_encoder(cats,y, tr_lb):
import category_encoders as ce
le = LabelEncoder()
le.fit(y[tr_lb])
le = le.transform(y[tr_lb])
enc = ce.LeaveOneOutEncoder(cols=['domainID', 'siteID'])
# enc = enc.fit(cats).transform(cats)
train_enc = enc.fit_transform(cats[tr_lb],le)
return(train_enc)
siteID = None #"D01"
dim_red = None #"pca"
max_threshold = 350
too_rare = False
#data = pd.read_csv("./weak_label/indir/csv/brdf_june5Ttop_hist.csv")
data = pd.read_csv("./weak_label/test_plus_30_classes.csv")
#data = pd.read_csv("./centers_august_30k.csv")
data = data.drop(['growthForm', 'stemDiameter','plantStatus', 'canopyPosition', 'nlcdClass','height', 'Easting', 'Northing','itcLongitude','itcLatitude'], axis=1)
#data = data.drop(['elevation'], axis=1)
#data = pd.read_csv("./weak_label/indir/csv/bf2_top_reflectance.csv")
#data = data.drop(columns=['species', 'genus', 'genus_id'])
#data = pd.read_csv("/Users/sergiomarconi/Documents/Data/NEON/VST/vst_top_bf1_reflectance.csv")
is_bad_genus = ["ABIES", "BETUL", "FRAXI", "MAGNO", "SALIX", "2PLANT",
"OXYDE", "HALES", "PINUS", "QUERC", "PICEA"]
is_bad_genus = data['taxonID'].isin(is_bad_genus)
if dim_red is 'pca':
#apply dimensionality reduction on data
pca = PCA(n_components = 40)
X = data.drop(columns=['individualID', 'taxonID','siteID','domainID', 'height', 'area','elevatn'])
X = pd.DataFrame(X)
attr = data[['individualID', 'taxonID','siteID','domainID', 'height', 'area','elevatn']]
data = pd.concat([attr, X], axis=1)
if dim_red is 'kld':
import sys
sys.path.append("../hsi_toolkit_py")
from dimensionality_reduction import hdr
X = data.drop(columns=['individualID', 'taxonID','siteID','domainID', 'height', 'area','elevatn'])
X = hdr.dimReduction(X, numBands = 20)
X = pd.DataFrame(X)
attr = data[['individualID', 'taxonID','siteID','domainID', 'height', 'area','elevatn']]
data = pd.concat([attr, X], axis=1)
if too_rare is True:
too_rare = ["ACFA","ACPE","AMEL","ARVIM","BEAL2","BEPA","BOSU2",
"CAAQ2","CACO15","CAOV3","CODI8","DIVI5","GUOF",
"LELE10","MAFR","NYAQ2","OSVI","PIAC",
"PIVI2","POGR4","PRAV","PRSE2","QUFA","QULA2","QULA3",
"QUPH","QUSH","ROPS","SAAL5","SILA20", "TAHE","TIAM"]
is_too_rare = data['taxonID'].isin(too_rare)
data = data[~is_too_rare]
#filter by site
if siteID is not None:
is_site = data['domainID']==siteID
data = data[is_site]
species_id = data.taxonID.unique()
# #divide X and Y
# X = data.drop(columns=['individualID', 'taxonID'])
#splin into train and test by chosing columns
train_ids = data[['individualID','siteID', 'taxonID']].drop_duplicates()
train_ids = train_ids.groupby(['siteID', 'taxonID'],
group_keys=False).apply(lambda x: x.sample(int(len(x)/ 2)+1,
random_state=1))
#embedd/encode categorical data
train_ids.to_csv("./weak_label/indir/train_ids.csv")
# split train test from individualIDs
train = data['individualID'].isin(train_ids['individualID'])
test = data[~data["individualID"].isin(train_ids['individualID'])]
X_train = data.drop(columns=['individualID', 'taxonID'])[train]
X_test = data.drop(columns=['individualID', 'taxonID'])[~train]
y_train = data[['taxonID']][train]
y_test = data[['individualID','taxonID']][~train]
y_test.to_csv("./weak_label/indir/y_test.csv")
y = data[['taxonID']]
#encode categorical values using an LOO-Encoder and associating only 1 value to each
cats = data[['domainID', 'siteID']]
cat_encoder = categorical_encoder(cats,y['taxonID'], train)
cat_encoder = pd.DataFrame(np.hstack([cats[train],cat_encoder])).drop_duplicates()
cat_encoder.columns = ['domainID','siteID', 'domainE', 'siteE']
cat_encoder = cat_encoder.assign(domainE=pd.to_numeric(cat_encoder['domainE'], errors='coerce'))
cat_encoder = cat_encoder.assign(siteE=pd.to_numeric(cat_encoder['siteE'], errors='coerce'))
site_encode = cat_encoder.groupby('siteID')['siteE'].mean()
domain_encode = cat_encoder.groupby('domainID')['domainE'].mean()
# oversample using SMOTENC in order not to loose the categorical effects
#get relatie frequency of each class
ratios_for_each = Counter(y_train.taxonID)
ratios_for_each = pd.DataFrame.from_dict(ratios_for_each, orient='index').reset_index()
ratios_for_each.iloc[:,1] = ratios_for_each.iloc[:,1]
#max_threshold max limit is the most frequent tree
max_threshold = min(max(ratios_for_each.iloc[:,1])-1, max_threshold)
#get classes with less than max, and oversample them
thres_species = ratios_for_each.iloc[:,1] > max_threshold
thres_species = ratios_for_each[thres_species].iloc[:,0]
thres_species = y_train.taxonID.isin(thres_species)
x_tmp = X_train[thres_species]
y_tmp = y_train[thres_species]
#undersample
from imblearn.under_sampling import RandomUnderSampler
from imblearn.under_sampling import NeighbourhoodCleaningRule
if len(np.unique(y_tmp)) > 1:
#rus = ClusterCentroids(random_state=0)
rus = RandomUnderSampler(random_state=0)
X_resampled, y_resampled = rus.fit_resample(x_tmp.to_numpy(), y_tmp.to_numpy())
X_resampled = pd.DataFrame(X_resampled)
y_resampled = pd.DataFrame(y_resampled)
else:
import random
rindx = random.sample(range(0, len(y_tmp)), max_threshold)
X_resampled = x_tmp.iloc[rindx,:]
y_resampled = y_tmp.iloc[rindx,:]
X_resampled = pd.DataFrame(X_resampled)
y_resampled = | pd.DataFrame(y_resampled) | pandas.DataFrame |
import os
import glob
import pickle
import re
# Our numerical workhorses
import numpy as np
import pandas as pd
# Import the project utils
import sys
sys.path.insert(0, '../')
import NB_sortseq_utils as utils
# Import matplotlib stuff for plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
# Seaborn, useful for graphics
import seaborn as sns
sns.set_palette("deep", color_codes=True)
utils.set_plotting_style_MS()
#===============================================================================
# Set output directory
#===============================================================================
output = 'output_figs/'
#===============================================================================
# Read the data
#===============================================================================
datadir = '../mass_spec/*/'
files = glob.glob(datadir + '*.csv')
df = pd.DataFrame()
for f in enumerate(files):
# if "titration" in f[1]:
# continue
# grab only the lac data.
if "lac" not in f[1]:
continue
df_temp = | pd.DataFrame() | pandas.DataFrame |
'''Compare different estimators on public datasets
Code modified from https://github.com/tmadl/sklearn-random-bits-forest
'''
import argparse
import os
import pickle as pkl
import time
import warnings
from collections import defaultdict, OrderedDict
from typing import Any, Callable, List, Dict, Tuple
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import accuracy_score, roc_auc_score, average_precision_score, make_scorer
from sklearn.model_selection import KFold, train_test_split, cross_validate
from tqdm import tqdm
from experiments.config.config_general import DATASETS
from experiments.config.util import get_estimators_for_dataset, get_ensembles_for_dataset
from experiments.util import Model, MODEL_COMPARISON_PATH, get_clean_dataset, get_best_accuracy, remove_x_axis_duplicates
warnings.filterwarnings("ignore", message="Bins whose width")
def get_complexity(estimator: BaseEstimator) -> float:
if isinstance(estimator, (RandomForestClassifier, GradientBoostingClassifier)):
complexity = 0
for tree in estimator.estimators_:
if type(tree) is np.ndarray:
tree = tree[0]
complexity += (2 ** tree.get_depth()) * tree.get_depth()
return complexity
else:
return estimator.complexity_
def compute_meta_auc(result_data: pd.DataFrame,
prefix: str = '',
low_complexity_cutoff: int = 30,
max_start_complexity: int = 10) -> Tuple[pd.DataFrame, Tuple[float]]:
# LOW_COMPLEXITY_CUTOFF: complexity score under which a model is considered interpretable
# MAX_START_COMPLEXITY: min complexity of curves included in the AUC-of-AUC comparison must be below this value
# x_column = f'{prefix}_mean_complexity'
x_column = f'mean_complexity'
compute_columns = result_data.columns[result_data.columns.str.contains('mean')]
estimators = np.unique(result_data.index)
xs = np.empty(len(estimators), dtype=object)
ys = xs.copy()
for i, est in enumerate(estimators):
est_result_df = result_data[result_data.index.str.fullmatch(est)]
complexities_unsorted = est_result_df[x_column]
complexity_sort_indices = complexities_unsorted.argsort()
complexities = complexities_unsorted[complexity_sort_indices]
roc_aucs = est_result_df.iloc[complexity_sort_indices][compute_columns]
xs[i] = complexities.values
ys[i] = roc_aucs.values
# filter out curves which start too complex
start_under_10 = list(map(lambda x: min(x) < max_start_complexity, xs))
# find overlapping complexity region for roc-of-roc comparison
meta_auc_lb = max([x[0] for x in xs])
endpts = np.array([x[-1] for x in xs])
meta_auc_ub = min(endpts[endpts > meta_auc_lb])
meta_auc_ub = min(meta_auc_ub, low_complexity_cutoff)
# handle non-overlapping curves
endpt_after_lb = endpts > meta_auc_lb
eligible = start_under_10 & endpt_after_lb
# compute AUC of interpolated curves in overlap region
meta_aucs = defaultdict(lambda:[])
for i in range(len(xs)):
for c, col in enumerate(compute_columns):
if eligible[i]:
x, y = remove_x_axis_duplicates(xs[i], ys[i][:, c])
f_curve = interp1d(x, y)
x_interp = np.linspace(meta_auc_lb, meta_auc_ub, 100)
y_interp = f_curve(x_interp)
auc_value = np.trapz(y_interp, x=x_interp)
else:
auc_value = 0
meta_aucs[col + '_auc'].append(auc_value)
meta_auc_df = | pd.DataFrame(meta_aucs, index=estimators) | pandas.DataFrame |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#Reading the files and combining the data for plotting
lcg_data=pd.read_csv('data_lcg.txt', delim_whitespace=True, header=None)
lcg_data.columns=["k", "C(k)"]
lcg_data=lcg_data.assign(RNG="LCG")
pm_data= | pd.read_csv('data_pm.txt', delim_whitespace=True, header=None) | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
useful line processing tips for daily quick need
Created on Sun Oct 11 20:52:26 2020
@author: wuulong
"""
import os
import os.path
import zipfile
import re
from urllib.parse import quote
import pandas as pd
import requests
import json
import time
#%% load file contents to lines
def file_to_lines(pathname):
fo = open(pathname, "r")
#lines = fo.readlines()
lines = fo.read().splitlines()
fo.close()
return lines
def url_get(filename,url):
"""
get url to file
"""
print("filename=%s,url=%s" %(filename,url))
if not os.path.isfile(filename):
r = requests.get(url, params = {})
open(filename, 'wb').write(r.content)
def load_json(filename,case_id=0):
"""
load json file and transfer to panda
hardcode: handle json with data in 'data'
"""
with open(filename, 'r') as json_file:
data_head = json.load(json_file)
if case_id==1: # DrainageReport
#may need to change related to data.
data = data_head['Data']['FileList']
else:
data = data_head
if len(data)>0:
cols = data[0].keys()
else:
return None
out = []
for row in data:
item = []
for c in cols:
item.append(row.get(c, {}))
out.append(item)
return pd.DataFrame(out, columns=cols)
def DrainageReport():
"""
generate drainage report
"""
page = 1
#global df_all
df_all = None
"""
All
https://rdi-123.wrap.gov.tw/Integration_WRPI_Drainage/FuncModule/Drainage_2018/DrainageReport.ashx?cmd=public&exist=1&coun=&key=&limit=100
中央管區域排水
https://rdi-123.wrap.gov.tw/Integration_WRPI_Drainage/FuncModule/Drainage_2018/DrainageReport.ashx?cmd=public&exist=1&level=%u4E2D%u592E%u7BA1%u5340%u57DF%u6392%u6C34&coun=&key=&limit=100
直轄市管區域排水
https://rdi-123.wrap.gov.tw/Integration_WRPI_Drainage/FuncModule/Drainage_2018/DrainageReport.ashx?cmd=public&exist=1&level=%u76F4%u8F44%u5E02%u7BA1%u5340%u57DF%u6392%u6C34&coun=&key=&limit=100
縣市管區域排水
https://rdi-123.wrap.gov.tw/Integration_WRPI_Drainage/FuncModule/Drainage_2018/DrainageReport.ashx?cmd=public&exist=1&level=%u7E23%u5E02%u7BA1%u5340%u57DF%u6392%u6C34&coun=&key=&limit=100
市區排水
https://rdi-123.wrap.gov.tw/Integration_WRPI_Drainage/FuncModule/Drainage_2018/DrainageReport.ashx?cmd=public&exist=1&level=%u5E02%u5340%u6392%u6C34&coun=&key=&limit=100
"""
while True:
urlbase = "https://rdi-123.wrap.gov.tw/Integration_WRPI_Drainage/FuncModule/Drainage_2018/DrainageReport.ashx?cmd=public&exist=1&coun=&key=&limit=100"
url = "%s&page=%i" %(urlbase,page)
file_datestr = "output/DrainageReport"
filename = "%s_%04i.json" %(file_datestr,page)
cont = True
while cont:
try:
url_get(filename, url)
df = load_json(filename,1)
#df = pd.read_csv(filename)
cont = False
except:
print("Exception when process %s, retrying after 60s" %(filename))
if os.path.isfile(filename):
os.remove(filename)
time.sleep(60)
if page>1:
df_all = | pd.concat([df_all,df]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 10:02:26 2019
@author: meli
"""
import pandas as pd
import numpy as np
from models_ml import auto_model
# In[laoding dataset]
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer()
y = np.asarray(data.target)
X = np.asarray(data.data)
X_pred = np.asarray(data.data)
sub = | pd.DataFrame(X) | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
from predict_by_model import *
def GetStrn(strg):
s1 = strg.split(";")[-1]
s2 = "_".join(s1.split("__")[1:])
s3 = s2.split(".")[0]
return s3
if __name__ == "__main__":
try:
os.mkdir("LorikeetExperiments")
except:
pass
birdData = | pd.read_excel("LorikeetData.xlsx") | pandas.read_excel |
import pandas as pd
from aneris.boundary import Structure
class SeriesData(Structure):
'''Structure represented in a series of some sort'''
def get_data(self, raw, meta_data):
series = | pd.Series(raw) | pandas.Series |
import numpy as np
import pandas as pd
import re
import os
import natsort
import pickle
from sklearn.metrics.pairwise import cosine_similarity
from CONSTANTS import *
from matplotlib import pyplot as plt
import nltk
"""
This method, if not yet done, adds the docs to the word-dictionary, computes the cosine-similarity matrix of the dictionary
and returns a specified number of nearest docs/words to the given doc/word according to the cosine-similarity
Depending on the specification of the user, only the closest words kann be returned or only the closest documents
Additionally the user can return to closest documents, if any token is given (which can be any word as well), which enables a semantic search
Args:
word_doc (string): a word or doc of which the "closest" words /docs should be computed
path_to_docs (string): path to the docs (required to extract the doc-names)
embeddings (matrix): contains all the vectors for all words and docs
new_dict (dictionary or None): placeholder for the new dictionary to be created (which contains words and doc-names)
dict_for_update (dictionary): dictionary containing all words
num_of_close_words: the number of nearest words/docs that should be returned
docs_only (boolean): True, if only the nearest docs should be returned
token_only (boolean): True, if only the nearest words should be returned
Returns:
1. Argument A pandas-series containing the nearest words/docs and their corresponding cosine-similarity
2. Argument: Returns a list of the used books
3. Argument: Returns a DataFrame which contains all pairwise cosine-similarities
"""
def return_the_closest_words_or_docs(word_doc, path_to_docs, embeddings, new_dict, dict_for_update,
num_of_close_words, docs_only, token_only, GPE_only, GPE_entities,
PERSON_only, PERSON_entities, google_dict):
if (new_dict is None): # if this condition is true, the docs have to be added to the dictionary
files_in_directory_sorted = natsort.natsorted(os.listdir(path_to_docs))
docs_sorted = [(Book_Titles[int(re.findall("\d+", i)[0])], str(index)) for index, i in enumerate(files_in_directory_sorted)]
titles, _ = zip(*docs_sorted)
docs_sorted = [(str(titles[index])+"_PART_2",index) if (index>0 and titles[index] == titles[index-1]) else (titles[index],index) for index,i in enumerate(docs_sorted)] # if titles[index]==titles[index-1]] #
doc_names,_ = zip(*docs_sorted)
new_dict = {key: value for (key, value) in docs_sorted}
if (google_dict==False):
new_dict.update(dict_for_update)
vector_dataframe = | pd.DataFrame(embeddings) | pandas.DataFrame |
import matplotlib.pyplot as plt
import matplotlib.cm
import pandas as pd
import numpy as np
import itertools
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from matplotlib.colors import Normalize
from matplotlib.colorbar import ColorbarBase
from matplotlib import gridspec
import matplotlib.patheffects as PathEffects
PRACTICE_COLOR = '#22447A'
RECRUIT_COLOR = '#2EA9B0'
DEACTIVATED_COUNTY_COLOR = '#FBDFDB'
ACTIVATED_COUNTY_COLOR = '#EA5D4E'
def darken_color(color, amount=0.7):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], amount * c[1], c[2])
def print_map(df_genvasc_practices, df_potential_practices):
ax_map = plt.subplot(gs[2])
# Map bounding box
# westlimit=-1.5044; southlimit=52.1023; eastlimit=-0.3151; northlimit=52.8302
llcrnrlon = -1.6
llcrnrlat = 51.95
urcrnrlon = 0.6
urcrnrlat = 53.5
map = Basemap(
resolution='c',
projection='merc',
llcrnrlon=llcrnrlon,
llcrnrlat=llcrnrlat,
urcrnrlon=urcrnrlon,
urcrnrlat=urcrnrlat
)
map.readshapefile('data/English Ceremonial Counties', 'counties', drawbounds=False)
activated_counties = [
'Leicestershire',
'Northamptonshire',
'Rutland',
]
deactivated_counties = [
'Cambridgeshire',
'Lincolnshire',
]
patches = [Polygon(np.array(shape), True) for info, shape in zip(map.counties_info, map.counties) if info['NAME'] in activated_counties]
pc = PatchCollection(patches, zorder=2, facecolors=[ACTIVATED_COUNTY_COLOR], edgecolor='#FFFFFF', linewidths=1.)
ax_map.add_collection(pc)
patches = [Polygon(np.array(shape), True) for info, shape in zip(map.counties_info, map.counties) if info['NAME'] in deactivated_counties]
pc = PatchCollection(patches, zorder=2, facecolors=[DEACTIVATED_COUNTY_COLOR], edgecolor='#FFFFFF', linewidths=1.)
ax_map.add_collection(pc)
df_towns = pd.read_csv('data/towns_geo.csv')
df_towns = df_towns[df_towns.name.isin([
'leicester',
'loughborough',
'northampton',
'kettering',
'cambridge',
'peterborough',
'lincoln',
'grantham',
'boston',
'oakham',
])]
for index, row in df_towns.iterrows():
x, y = map(row['x'], row['y'])
txt = ax_map.text(
x,
y,
row['name'].title().replace(' ', '\n'),
fontsize=26,
horizontalalignment='left',
verticalalignment='top',
color='#222222',
weight='bold'
)
txt.set_path_effects([PathEffects.withStroke(linewidth=6, foreground='#FFFFFF')])
for index, row in pd.concat([df_genvasc_practices, df_potential_practices]).iterrows():
x, y = map(row['x'], row['y'])
map.plot(x, y, marker='.', color=PRACTICE_COLOR, markersize=15)
def print_bar(title, first, second, color, max, step):
plt.title(title, fontsize=26, y=1.02)
plt.bar((title), first, align='center', color=color)
plt.bar((title), second, bottom=first, align='center', color=darken_color(color, 2))
plt.yticks(np.arange(0, step * round(max/step), step=step), fontsize=20)
plt.xticks([])
def recruits():
ax_recruits = plt.subplot(gs[1])
df_recruitment = pd.read_csv('data/genvasc_recruitment.csv')
df_recruitment = df_recruitment.set_index('year')
recruit_count = df_recruitment.loc[year]['cum_recruited']
potential_recruits = 0
if include_potential:
potential_linc_cam_in_2_years = 8000
actual_llr_northants_in_last_year = 8000
potential_recruits += potential_linc_cam_in_2_years + (actual_llr_northants_in_last_year * 2)
print_bar('Recruits', recruit_count, potential_recruits, RECRUIT_COLOR, 75_000, 10_000)
def practices(df_genvasc_practices, df_potential_practices):
ax_practices = plt.subplot(gs[0])
practice_count = len(df_genvasc_practices.index)
potential_practice_count = (0.7 * len(df_potential_practices.index))
print_bar('Practices', practice_count, potential_practice_count, PRACTICE_COLOR, 600, 100)
year = 2018
include_potential = False
output_filename = 'genvasc_2018'
plt.rcParams["font.family"] = "lato"
fig = plt.figure(figsize=(16, 12))
gs = gridspec.GridSpec(
1,
3,
width_ratios=[1, 1,10]
)
df_genvasc_practices = pd.read_csv('data/genvasc_practices_geo.csv')
df_potential_practices = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Old style Series.sum() does not support parameters")
def test_series_sum_skipna_false(self):
def test_impl(S):
return S.sum(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(np.isnan(hpat_func(S)), np.isnan(test_impl(S)))
@unittest.skipIf(not hpat.config.config_pipeline_hpat_default,
"Series.sum() operator + is not implemented yet for Numba")
def test_series_sum2(self):
def test_impl(S):
return (S + S).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_prod(self):
def test_impl(S, skipna):
return S.prod(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
S = pd.Series(data)
for skipna_var in [True, False]:
actual = hpat_func(S, skipna=skipna_var)
expected = test_impl(S, skipna=skipna_var)
if np.isnan(actual) or np.isnan(expected):
# con not compare Nan != Nan directly
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_prod_skipna_default(self):
def test_impl(S):
return S.prod()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2, 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_count1(self):
def test_impl(S):
return S.count()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series([np.nan, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(['aa', 'bb', np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_mean(self):
def test_impl(S):
return S.mean()
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for data in data_samples:
with self.subTest(data=data):
S = pd.Series(data)
actual = hpat_func(S)
expected = test_impl(S)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
hpat_func = hpat.jit(test_impl)
data_samples = [
[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 3, 2.2, 1, 2],
[6, 6, np.nan, 2, np.nan, 1, 3, 3, np.inf, 2, 1, 2, np.inf],
[1.1, 0.3, np.nan, 1.0, np.inf, 0.3, 2.1, np.nan, 2.2, np.inf],
[1.1, 0.3, np.nan, 1, np.inf, 0, 1.1, np.nan, 2.2, np.inf, 2, 2],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.inf],
]
for skipna in [True, False]:
for data in data_samples:
S = pd.Series(data)
actual = hpat_func(S, skipna)
expected = test_impl(S, skipna)
if np.isnan(actual) or np.isnan(expected):
self.assertEqual(np.isnan(actual), np.isnan(expected))
else:
self.assertEqual(actual, expected)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = hpat.jit(test_impl)
# TODO type_min/type_max
for input_data in [[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf]]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default, "Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = hpat.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_value_counts(self):
def test_impl(S):
return S.value_counts()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['AA', 'BB', 'C', 'AA', 'C', 'AA'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_dist_input1(self):
'''Verify distribution of a Series without index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_dist_input2(self):
'''Verify distribution of a Series with integer index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
'''Verify distribution of a Series with string index'''
def test_impl(S):
return S.max()
hpat_func = hpat.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = hpat.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
def test_series_rolling1(self):
def test_impl(S):
return S.rolling(3).sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_map1(self):
def test_impl(S):
return S.map(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_global1(self):
def test_impl(S):
return S.map(lambda a: a + GLOBAL_VAL)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup1(self):
def test_impl(S):
return S.map(lambda a: (a, 2 * a))
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_map_tup_map1(self):
def test_impl(S):
A = S.map(lambda a: (a, 2 * a))
return A.map(lambda a: a[1])
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), np.float32(5)])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_assert1(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3])
S2 = pd.Series([6., 21., 3., 5.])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_assert2(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6., 21., 3., 5.])
S2 = pd.Series([1, 2, 3])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
def test_series_combine_integer(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 16)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 3, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_different_types(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([6.1, 21.2, 3.3, 5.4, 6.7])
S2 = pd.Series([1, 2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_integer_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 17, -5, 4])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_combine_value_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 1237.56)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5., 0.0])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_apply1(self):
def test_impl(S):
return S.apply(lambda a: 2 * a)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2., 3., 4., 5.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_abs1(self):
def test_impl(S):
return S.abs()
hpat_func = hpat.jit(test_impl)
S = pd.Series([np.nan, -2., 3., 0.5E-01, 0xFF, 0o7, 0b101])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_cov1(self):
def test_impl(S1, S2):
return S1.cov(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_corr1(self):
def test_impl(S1, S2):
return S1.corr(S2)
hpat_func = hpat.jit(test_impl)
for pair in _cov_corr_series:
S1, S2 = pair
np.testing.assert_almost_equal(
hpat_func(S1, S2), test_impl(S1, S2),
err_msg='S1={}\nS2={}'.format(S1, S2))
def test_series_str_len1(self):
def test_impl(S):
return S.str.len()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'abc', 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str2str(self):
str2str_methods = ('capitalize', 'lower', 'lstrip', 'rstrip',
'strip', 'swapcase', 'title', 'upper')
for method in str2str_methods:
func_text = "def test_impl(S):\n"
func_text += " return S.str.{}()\n".format(method)
test_impl = _make_func_from_text(func_text)
hpat_func = hpat.jit(test_impl)
S = pd.Series([' \tbbCD\t ', 'ABC', ' mCDm\t', 'abc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_append1(self):
def test_impl(S, other):
return S.append(other).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
# Test single series
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
def test_series_append2(self):
def test_impl(S1, S2, S3):
return S1.append([S2, S3]).values
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([-2., 3., 9.1])
S2 = pd.Series([-2., 5.0])
S3 = pd.Series([1.0])
# Test series tuple
np.testing.assert_array_equal(hpat_func(S1, S2, S3),
test_impl(S1, S2, S3))
def test_series_isin_list1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = [1, 2, 5, 7, 8]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = [1., 2., 5., 7., 8.]
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_list3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'q', 'w', 'c', 'd', 'e', 'r'])
values = ['a', 'q', 'c', 'd', 'e']
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set1(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
values = {1, 2, 5, 7, 8}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isin_set2(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
values = {1., 2., 5., 7., 8.}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
@unittest.skip('TODO: requires hashable unicode strings in Numba')
def test_series_isin_set3(self):
def test_impl(S, values):
return S.isin(values)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['a', 'b', 'c', 'd', 'e'] * 2)
values = {'b', 'c', 'e'}
pd.testing.assert_series_equal(hpat_func(S, values), test_impl(S, values))
def test_series_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3., np.inf])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull1(self):
def test_impl(S):
return S.isnull()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_isnull_full(self):
def test_impl(series):
return series.isnull()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_numeric + [test_global_input_data_unicode_kind4]:
series = pd.Series(data * 3)
ref_result = test_impl(series)
jit_result = hpat_func(series)
pd.testing.assert_series_equal(ref_result, jit_result)
def test_series_notna1(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_notna_noidx_float(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_int(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_notna_noidx_num(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_notna_noidx_str(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
result_ref = test_impl(S)
result_jit = hpat_func(S)
pd.testing.assert_series_equal(result_jit, result_ref)
def test_series_str_notna(self):
def test_impl(S):
return S.notna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_str_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', None, 'c', 'cccd'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different')
def test_series_dt_isna1(self):
def test_impl(S):
return S.isna()
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_nlargest1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_default1(self):
def test_impl(S):
return S.nlargest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_nan1(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nlargest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_str(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nlargest_index_int(self):
def test_impl(S):
return S.nlargest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([73, 21, 10005, 5, 1], index=[2, 3, 4, 5, 6])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_default1(self):
def test_impl(S):
return S.nsmallest()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_nan1(self):
def test_impl(S):
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, np.nan, 3.0, 2.0, np.nan, 4.0])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_nsmallest_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.nsmallest(4)
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func().values, test_impl().values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_str(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_nsmallest_index_int(self):
def test_impl(S):
return S.nsmallest(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([41, 32, 33, 4, 5], index=[1, 2, 3, 4, 5])
np.testing.assert_array_equal(hpat_func(S).values, test_impl(S).values)
def test_series_head1(self):
def test_impl(S):
return S.head(4)
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_default1(self):
'''Verifies default head method for non-distributed pass of Series with no index'''
def test_impl(S):
return S.head()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_index1(self):
'''Verifies head method for Series with integer index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index2(self):
'''Verifies head method for Series with string index created inside jitted function'''
def test_impl():
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
return S.head(3)
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_head_index3(self):
'''Verifies head method for non-distributed pass of Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip("Passed if run single")
def test_series_head_index4(self):
'''Verifies head method for non-distributed pass of Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(test_impl)
S = pd.Series([6, 9, 2, 4, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_head_parallel1(self):
'''Verifies head method for distributed Series with string data and no index'''
def test_impl(S):
return S.head(7)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
# need to test different lenghts, as head's size is fixed and implementation
# depends on relation of size of the data per processor to output data size
for n in range(1, 5):
S = pd.Series(['a', 'ab', 'abc', 'c', 'f', 'hh', ''] * n)
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_index_parallel1(self):
'''Verifies head method for distributed Series with integer index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], [8, 1, 6, 0, 9, 1, 3])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip("Passed if run single")
def test_series_head_index_parallel2(self):
'''Verifies head method for distributed Series with string index'''
def test_impl(S):
return S.head(3)
hpat_func = hpat.jit(distributed={'S'})(test_impl)
S = pd.Series([6, 9, 2, 3, 6, 4, 5], ['a', 'ab', 'abc', 'c', 'f', 'hh', ''])
start, end = get_start_end(len(S))
pd.testing.assert_series_equal(hpat_func(S[start:end]), test_impl(S))
self.assertTrue(count_array_OneDs() > 0)
def test_series_head_noidx_float(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_float64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_int(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_integer64:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Need fix test_global_input_data_integer64")
def test_series_head_noidx_num(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
for input_data in test_global_input_data_numeric:
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Old implementation not work with n negative and data str")
def test_series_head_noidx_str(self):
def test_impl(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
input_data = test_global_input_data_unicode_kind4
S = pd.Series(input_data)
for n in [-1, 0, 2, 3]:
result_ref = test_impl(S, n)
result_jit = hpat_func(S, n)
pd.testing.assert_series_equal(result_jit, result_ref)
@unittest.skip("Broke another three tests")
def test_series_head_idx(self):
def test_impl(S):
return S.head()
def test_impl_param(S, n):
return S.head(n)
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
['as', 'b', 'abb', 'sss', 'ytr65', '', 'qw', 'a', 'b'],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
pd.testing.assert_series_equal(result, result_ref)
hpat_func_param1 = hpat.jit(test_impl_param)
for param1 in [1, 3, 7]:
result_param1_ref = test_impl_param(S, param1)
result_param1 = hpat_func_param1(S, param1)
pd.testing.assert_series_equal(result_param1, result_param1_ref)
def test_series_median1(self):
'''Verifies median implementation for float and integer series of random data'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
m = 100
np.random.seed(0)
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
# odd size
m = 101
S = pd.Series(np.random.randint(-30, 30, m))
self.assertEqual(hpat_func(S), test_impl(S))
S = pd.Series(np.random.ranf(m))
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"BUG: old-style median implementation doesn't filter NaNs")
def test_series_median_skipna_default1(self):
'''Verifies median implementation with default skipna=True argument on a series with NA values'''
def test_impl(S):
return S.median()
hpat_func = hpat.jit(test_impl)
S = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
"Skipna argument is not supported in old-style")
def test_series_median_skipna_false1(self):
'''Verifies median implementation with skipna=False on a series with NA values'''
def test_impl(S):
return S.median(skipna=False)
hpat_func = hpat.jit(test_impl)
# np.inf is not NaN, so verify that a correct number is returned
S1 = pd.Series([2., 3., 5., np.inf, 5., 6., 7.])
self.assertEqual(hpat_func(S1), test_impl(S1))
# TODO: both return values are 'nan', but HPAT's is not np.nan, hence checking with
# assertIs() doesn't work - check if it's Numba relatated
S2 = pd.Series([2., 3., 5., np.nan, 5., 6., 7.])
self.assertEqual(np.isnan(hpat_func(S2)), np.isnan(test_impl(S2)))
def test_series_median_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.median()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
def test_series_argsort_parallel(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.argsort().values
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_idxmin1(self):
def test_impl(A):
return A.idxmin()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_idxmin_str(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmin_str_idx(self):
def test_impl(S):
return S.idxmin(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_no(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_int(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3], [4, 45, 14])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmin_noidx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmin_idx(self):
def test_impl(S):
return S.idxmin()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, -np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_idxmax1(self):
def test_impl(A):
return A.idxmax()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
@unittest.skip("Skipna is not implemented")
def test_series_idxmax_str_idx(self):
def test_impl(S):
return S.idxmax(skipna=False)
hpat_func = hpat.jit(test_impl)
S = pd.Series([8, 6, 34, np.nan], ['a', 'ab', 'abc', 'c'])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_idxmax_noidx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.inf, np.nan, np.inf, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
def test_series_idxmax_idx(self):
def test_impl(S):
return S.idxmax()
hpat_func = hpat.jit(test_impl)
data_test = [[6, 6, 2, 1, 3, 3, 2, 1, 2],
[1.1, 0.3, 2.1, 1, 3, 0.3, 2.1, 1.1, 2.2],
[6, 6.1, 2.2, 1, 3, 0, 2.2, 1, 2],
[6, 6, 2, 1, 3, np.nan, np.nan, np.nan, np.nan],
[3., 5.3, np.nan, np.nan, np.inf, np.inf, 4.4, 3.7, 8.9]
]
for input_data in data_test:
for index_data in data_test:
S = pd.Series(input_data, index_data)
result_ref = test_impl(S)
result = hpat_func(S)
if np.isnan(result) or np.isnan(result_ref):
self.assertEqual(np.isnan(result), np.isnan(result_ref))
else:
self.assertEqual(result, result_ref)
def test_series_sort_values1(self):
def test_impl(A):
return A.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
S = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_sort_values_index1(self):
def test_impl(A, B):
S = pd.Series(A, B)
return S.sort_values()
hpat_func = hpat.jit(test_impl)
n = 11
np.random.seed(0)
# TODO: support passing Series with Index
# S = pd.Series(np.random.ranf(n), np.random.randint(0, 100, n))
A = np.random.ranf(n)
B = np.random.ranf(n)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
def test_series_sort_values_parallel1(self):
# create `kde.parquet` file
ParquetGenerator.gen_kde_pq()
def test_impl():
df = pq.read_table('kde.parquet').to_pandas()
S = df.points
return S.sort_values()
hpat_func = hpat.jit(test_impl)
np.testing.assert_array_equal(hpat_func(), test_impl())
def test_series_shift(self):
def pyfunc():
series = pd.Series([1.0, np.nan, -1.0, 0.0, 5e-324])
return series.shift()
cfunc = hpat.jit(pyfunc)
pd.testing.assert_series_equal(cfunc(), pyfunc())
def test_series_shift_unboxing(self):
def pyfunc(series):
return series.shift()
cfunc = hpat.jit(pyfunc)
for data in test_global_input_data_float64:
series = pd.Series(data)
pd.testing.assert_series_equal(cfunc(series), pyfunc(series))
def test_series_shift_full(self):
def pyfunc(series, periods, freq, axis, fill_value):
return series.shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value)
cfunc = hpat.jit(pyfunc)
freq = None
axis = 0
for data in test_global_input_data_float64:
series = | pd.Series(data) | pandas.Series |
import argparse
import csv
import numpy as np
import os.path
from os import path
import pandas as pd
pd.options.mode.chained_assignment = None
import sys
# from model_utils import get_loader, change_size
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# warnings.filterwarnings("ignore", category=SettingWithCopyWarning)
def load_ppmi_baseline_data(p_ids, use_healthy=True):
# if not os.path.exists('/afs/REDACTED.REDACTED.edu/group/REDACTED/datasets/ppmi/visit_feature_inputs_asof_2019Jan24_using_CMEDTM/'):
# print('Error: PPMI path does not exist. Are you on the wrong machine? PPMI experiments can only be run on Chronos/Kratos.')
# return
ppmi_healthy_csv = '~/chf-github/data/HC_baseline.csv'
ppmi_csv = '~/chf-github/data/PD_baseline.csv'
ppmi = pd.read_csv(ppmi_csv)
healthy = pd.read_csv(ppmi_healthy_csv)
baseline_cols = [
'MALE',
'HISPLAT',
'RAWHITE',
'RAASIAN',
'RABLACK',
'RAINDALS',
'RAHAWOPI',
'RANOS',
'BIOMOMPD',
'BIODADPD',
'FULSIBPD',
'HAFSIBPD',
'MAGPARPD',
'PAGPARPD',
'MATAUPD',
'PATAUPD',
'KIDSPD',
'EDUCYRS',
'RIGHT_HANDED',
'LEFT_HANDED',
'UPSITBK1',
'UPSITBK2',
'UPSITBK3',
'UPSITBK4',
'UPSIT'
]
if use_healthy:
df = | pd.concat([healthy, ppmi]) | pandas.concat |
#////////////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////////////
# script: getMutationCounts_overall_and_GOI.py
# author: Lincoln
# date: 10.11.18
#
# This script performs some basic analysis on vcf files, as output by
# my SNP_detection_pipeline. It has 4 separate run modes:
# 1. get raw mutation counts, for every cell
# 2. get mutation counts, after filtering through COSMIC database
# 3. get mutation counts, ' ',
# with the specific LAUD annotation
# 4. for a given GOI, which cells have mutations, and what are those
# mutations, on the amino acid level? This creates the necessary
# input for all of the lolliplot stuff. As well as for
# makeSummaryTable.ipynb
#
#////////////////////////////////////////////////////////////////////
#////////////////////////////////////////////////////////////////////
import numpy as np
import VCF # comes from <NAME>
import os
import csv
import pandas as pd
import sys
import itertools
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#////////////////////////////////////////////////////////////////////
# getFileNames()
# Get file names based on the specified path
#
#////////////////////////////////////////////////////////////////////
def getFileNames():
files = []
for file in os.listdir("vcf_germline_filter/"):
if file.endswith(".vcf"):
fullPath = (os.path.join("vcf_germline_filter/", file))
files.append(fullPath)
return files
#////////////////////////////////////////////////////////////////////
# getRawCounts()
# Creates dictionary obj with raw counts for GATK hits w/in a given set of vcf files
#
#////////////////////////////////////////////////////////////////////
def getRawCounts(fileNames):
print('getting raw counts...')
cells_dict = {}
for f in fileNames:
cell = f.replace("vcf_germline_filter/", "")
cell = cell.replace(".vcf", "")
df = VCF.dataframe(f)
unique = len(np.unique(df.POS))
cells_dict.update({cell : unique})
print('finished!')
return cells_dict
#////////////////////////////////////////////////////////////////////
# getGenomePos()
# Returns a genome position sting that will match against the ones w/in COSMIC db
#
#////////////////////////////////////////////////////////////////////
def getGenomePos(sample):
try:
chr = sample[0]
chr = chr.replace("chr", "")
pos = int(sample[1])
ref = str(sample[3])
alt = str(sample[4])
if (len(ref) == 1) & (len(alt) == 1): # most basic case
secondPos = pos
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
elif (len(ref) > 1) & (len(alt) == 1):
secondPos = pos + len(ref)
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
elif (len(alt) > 1) & (len(ref) == 1):
secondPos = pos + len(alt)
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
else: # BOTH > 1 .... not sure what to do here. does this actually happen?
secondPos = 'dummy'
genomePos = chr + ':' + str(pos) + '-' + str(secondPos)
except:
genomePos = 'chr0:0-0'
return(genomePos)
#////////////////////////////////////////////////////////////////////
# getFilterCountsBasic()
# Creates dictionry obj with COSMIC filtered GATK hits w/in a given set of vcfs
#
#////////////////////////////////////////////////////////////////////
def getFilterCountsBasic(fileNames):
print('getting filter counts basic...')
cells_dict_filter = {}
genomePos_db = | pd.Series(database['Mutation genome position']) | pandas.Series |
'''
Created on Dec 14, 2016
Purpose: Given a list of keggKO Results from "Detail Page". Create a map which contains
further information besides protein ID (e.g. HOG membership)
Purpose2: For individual lists containing this secondary information extract all its
genes by id and extract all its annotated genes and pathways
@author: bardya
'''
import os
import subprocess
import csv
import sqlite3
import pandas as pd
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='generate fasta files of all groups, naming scheme: OG<#species>_<groupID>.fa')
parser.add_argument('-omawd', dest='oma_workpath', metavar='<oma_working_directory_path>', required= True,
help='path to the OMA working directory')
parser.add_argument('-omaout', dest='oma_output_dirname', metavar='<oma_output_dirname>', default = "Results",
help='base directory of the OMA output')
parser.add_argument('-m', '--mode', dest='mode', metavar='<hog|og|hogaware>', type=str,
choices=["hog", "hogs", "HOG", "HOGs", "og", "ogs", "OG", "OGS", "HOGAware","HOGAWARE","hogaware","hogAware"],
default="ogs", help='based on selected mode parse the OrthologousGroups.orthoxml or the HierarchicalGroups.orthoxml file located in OMA output directory.')
parser.add_argument('-f', '--force', dest='force_flag', metavar='<force overwrite flag>', action='store_const', const=1, default=0,
help='if set, the output in the directory "./Bins" will be overwritten')
parser.add_argument('--no-stats', dest='stats_flag', metavar='<stats_to_stdout_flag>', action='store_const', const=0, default=1,
help='if set, script does not give out a statistical overview to stdout')
parser.add_argument('--no-accessory', dest='accesory_flag', metavar='<produce accessory genomes flag>', action='store_const', const=1, default=0,
help='if set, script gives out the accessory genomes into the directory "./Accessory" relativ to omawd')
parser.add_argument('-t', '--speciestree', dest='nwcktree', metavar='<path/to/tree.file>', type=argparse.FileType('rt'),
help='path to a file containing the species tree in string representation')
parser.add_argument('--version', action='version', version='0.1')
return parser.parse_args()
def clearCheckPath(outdir, force=0):
if os.path.isdir(outdir):
if force:
import shutil
shutil.rmtree(outdir, ignore_errors=True)
else:
raise IOError("Output Directory already exiting. Specify '-f' option to force overwrite")
os.makedirs(outdir)
def clearCheckFile(filepath, force=0):
if os.path.isfile(filepath):
if force:
import shutil
shutil.rmtree(filepath, ignore_errors=True)
else:
raise IOError("Output File already exiting. Specify '-f' option to force overwrite")
def createSqlTable(con, filepath, tablename, columnnames, sep='\t', mode='fail', primary_key=()):
'''creates a database table '''
try:
df = pd.read_csv(filepath, sep=sep, names=columnnames, index_col=False)
df.to_sql(tablename, con, if_exists=mode, index=False)
except ValueError as e:
print(tablename + ' already exists in the database.')
except Exception as e:
print('Problem with creation of ' + tablename)
print(''' Error Message:\n'''.format(e))
#if primary_key:
# con.execute('ALTER TABLE {} ADD PRIMARY KEY {};'.format(tablename, tuple(primary_key)))
con.commit()
#def createProtein2Ktable():
# def concatAllKeggResults(listofcsvs, keggjobid2organism):
# '''Concatenate textfile outputs of KEGG Blastkoala analysis to a single large file with structure specied-id \t kegg result line
# '''
# subprocess.call('''for i in *.csv; do id=$(echo $i | sed 's/\.csv$//'); while read line; do echo "$id $line" >> protein2keggK.map; done < $i; done''', shell=True)
#
# subprocess.call('''while read line; do id="$(echo -e "$line" | cut -f1)"; id="$(echo -e "$id" | sed 's/\./__/')"; name="$(echo -e "$line" | cut -f2,3)"; name="$(echo -e "$name" | sed 's/\t/ /')"; sed -i "s/$id/$name/" protein2keggK.map; done < keggoutput2organism.map''', shell=True)
#
# subprocess.call('''cut -f1,2,3,4 protein2keggK.map > protein2keggK_no_seconday.map''', shell=True)
#
#
# def addAdditionalInfo(additional_info_map):
# ''' Adds an additional info column to each line of the protein2keggK_no_seconday.map
# '''
#
# subprocess.call('''for i in HOG*; do ids=$(grep ">" $i | cut -d' ' -f1 | cut -c2-25); while read id; do res=$(grep -m1 "$id" ../../kegg/protein2keggK_no_seconday.map); K=$(echo "$res" | cut -f3); if [[ ! -z "$K" ]]; then categ=$(grep -m1 $K /share/project/bardya/dbs/kegg/keggK2pathway.map); else categ=$(echo -e '\t\t\t'); fi; echo -e "$i"'\t'"$id"'\t'"$categ"; done < "$ids"; done > results_kegg_all.map''', shell=True)
#
#
# def OrganismProtein2HOGmap():
# suprocess.call('''for i in *.fa; do headers=$(grep ">" $i); while read -r line; do id=$(echo "$line" | cut -d ' ' -f1 | cut -c2-50 | tr -d '[:space:]'); organism=$(echo "$line" | rev | cut -d' ' -f1 | rev | tr -d '\[\]' | tr -d '[:space:]'); echo "$organism $id $i"; done <<< "$headers"; done > OrganismProtein2HOG.map''')
#
if __name__ == '__main__':
KEGGKO2PATHWAY_MAP = '/share/project/bardya/dbs/kegg/keggK2pathway.map'
#created with kegg_hierarch2map.py --> Each line contains tab separated
#KO Gene name/ description/ EC Pathway id + Pathway name Category
#K00844 HK; hexokinase [EC:2.7.1.1] 01200 Carbon metabolism [PATH:ko01200] Metabolism
PROTEIN2KEGG_MAP = '/share/project/bardya/Enterobacteriaceae/kegg/protein2keggK0.map'
#-->Each line contains tab separated information on
#OrganismID ProteinID KEGG Results (tab separated detail-output of BlastKoala)
#GCF_000736695__1_XBKBD_3526_PRJEB4325_protein WP_002211347.1 K02518 infA; translation initiation factor IF-1 73
ORGANISMPROTEIN2HOG_MAP = '/share/project/bardya/Enterobacteriaceae/OMA_prot/Results/HOGFasta/OrganismProtein2HOG.map2'
#created with #suprocess.call('''for i in *.fa; do headers=$(grep ">" $i); while read -r line; do id=$(echo "$line" | cut -d ' ' -f1 | cut -c2-50 | tr -d '[:space:]'); organism=$(echo "$line" | rev | cut -d' ' -f1 | rev | tr -d '\[\]' | tr -d '[:space:]'); echo "$organism $id $i"; done <<< "$headers"; done > OrganismProtein2HOG.map''')
#-->Each line contains tab separated information on
#OrganismID ProteinID HOG_ID
#GCF_000252955__1_ASM25295v1_protein WP_041573901.1 HOG10000.fa
LOSSGAINHOG2Level = '/share/project/bardya/Enterobacteriaceae/OMA_prot/level2hoglossgain.map'
#created with cd HOGLevel_Gains && for i in *; do while read line; do echo "$i $line"; done < $i >> ../LOSSMAP; done && > sed -i 's/^/G\t/' ../GAINMAP
#cd HOGLevel_Losses && for i in *; do while read line; do echo "$i $line"; done < $i >> ../LOSSMAP; done && > sed -i 's/^/L\t/' ../LOSSMAP
#cd .. && cat GAINMAP LOSSMAP > level2hoglossgain.map
#con = sqlite3.connect(":memory:")
con = sqlite3.connect("mykeggdb")
cur = con.cursor()
createSqlTable(con, KEGGKO2PATHWAY_MAP, 'keggK2pathway',
['KO', 'Description', 'Pathway', 'Category'])
createSqlTable(con, PROTEIN2KEGG_MAP, 'OrganismProtein2KeggResults',
['Organism', 'ProteinID', 'KO_primary', 'Description', 'SimScore_primary', 'KO_secondary', 'SimScore_secondary' ], mode='append')
createSqlTable(con, ORGANISMPROTEIN2HOG_MAP, 'OrganismProtein2HOG',
['Organism', 'ProteinID', 'HOG' ])
createSqlTable(con, LOSSGAINHOG2Level, 'lossgainhog2level',
['Event', 'Level', 'HOG' ])
con.execute('''CREATE TABLE IF NOT EXISTS OrgProHog2Keggres AS
SELECT OrganismProtein2KeggResults.Organism,
OrganismProtein2KeggResults.ProteinID,
OrganismProtein2HOG.HOG,
OrganismProtein2KeggResults.KO_primary,
OrganismProtein2KeggResults.Description,
OrganismProtein2KeggResults.SimScore_primary,
OrganismProtein2KeggResults.KO_secondary,
OrganismProtein2KeggResults.SimScore_secondary
FROM OrganismProtein2KeggResults LEFT OUTER JOIN OrganismProtein2HOG ON
((OrganismProtein2KeggResults.Organism = OrganismProtein2HOG.Organism) AND
(OrganismProtein2KeggResults.ProteinID = OrganismProtein2HOG.ProteinID) );
''')
con.commit()
sql = '''CREATE TABLE IF NOT EXISTS HogKo2Pathway AS
SELECT OrgProHog2Keggres.HOG, keggK2pathway.KO, keggK2pathway.Pathway, keggK2pathway.Category
FROM OrgProHog2Keggres, keggK2Pathway
WHERE
OrgProHog2Keggres.KO_primary = keggK2pathway.KO;'''
con.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS gainsAtlevel AS
SELECT * FROM HogKo2Pathway LEFT OUTER JOIN lossgainhog2level ON (HogKo2Pathway.HOG = lossgainhog2level.HOG) WHERE lossgainhog2level.Event = 'G';'''
con.execute(sql)
sql = '''CREATE TABLE IF NOT EXISTS lossesAtlevel AS
SELECT * FROM HogKo2Pathway LEFT OUTER JOIN lossgainhog2level ON (HogKo2Pathway.HOG = lossgainhog2level.HOG) WHERE lossgainhog2level.Event = 'L';'''
con.execute(sql)
# for i in range(77):
# sql = 'select HOG, KO, Pathway, Category from gainsatlevel where Level={};'.format(i)
# df = pd.read_sql(sql, con)
# df.to_csv('/share/project/bardya/Enterobacteriaceae/kegg/only_relevant/kegg_HOGLevel_Gains/{}'.format(i), sep='\t', header=False, index=False)
#
# for i in range(77):
# sql = 'select HOG, KO, Pathway, Category from lossesatlevel where Level={};'.format(i)
# df = pd.read_sql(sql, con)
# df.to_csv('/share/project/bardya/Enterobacteriaceae/kegg/only_relevant/kegg_HOGLevel_Losses/{}'.format(i), sep='\t', header=False, index=False)
#
hogrange = [0,1,2]
losses = ()
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
import sqlite3
from sklearn.linear_model import Ridge
from sklearn import metrics
from scipy.optimize import curve_fit
conn = sqlite3.connect('project.db')
conn.text_factory = lambda x: str(x, 'iso-8859-1')
cur = conn.cursor()
### Total freshwater Withdrawal
get_TFW = """
SELECT Year, AreaId, Area, Value from parameter
WHERE VariableId = 4263
ORDER BY AreaId ASC
"""
get_area_exclude = """
SELECT AreaId from
(
SELECT Year, AreaId, Value, Area, count(Year) as count from parameter
where VariableId=4263
GROUP By AreaId
)
Where count <= 1
"""
df_TFW = pd.read_sql(sql=get_TFW, con=conn)
df_area_exclude = pd.read_sql(sql=get_area_exclude, con=conn)
list_area_exclude = df_area_exclude['AreaId'].values.tolist()
df_TFW = df_TFW[~df_TFW['AreaId'].isin(list_area_exclude)]
# Year where TFW values are estimated
# from 1980 to 2015 withh 5-year gap
Year = list(range(1980, 2016, 5))
Year = np.asarray(Year).reshape(-1, 1)
# Obtain Unique AreaId
unique_areaId = df_TFW.AreaId.unique().tolist()
Ridge_model_dict = {key: None for key in unique_areaId}
## Imputation
for i in unique_areaId:
if (i == 169): # Paraguay
tt = df_TFW.loc[df_TFW['AreaId'] == i]
tt_x = tt['Year'].values
tt_y = tt['Value'].values
b, a = np.polyfit(tt_x, np.log(tt_y), 1)
year_array = Year.reshape(len(Year),)
y = 5E-60 * np.exp(b * year_array)
Ridge_model_dict[i]={'pred': y.tolist()}
elif (i == 175): # Guinea-Bissau or Somalia
tt = df_TFW.loc[df_TFW['AreaId'] == i]
tt_x = tt['Year'].values
tt_y = tt['Value'].values
# Define sigmoid function
def sigmoid(x, a, b):
y = 0.18 / (1 + np.exp(-a * (x - b))) + 0.01
return y
popt, pcov = curve_fit(sigmoid, tt_x, tt_y, p0=[0.001778, 1993.5])
y = sigmoid(year_array, *popt)
Ridge_model_dict[i] = {'pred': y.tolist()}
elif (i == 201):
tt = df_TFW.loc[df_TFW['AreaId'] == i]
tt_x = tt['Year'].values
tt_y = tt['Value'].values
# Define sigmoid function
def sigmoid(x, a, b):
y = 2.7 / (1 + np.exp(-a * (x - b))) + 0.7
return y
popt, pcov = curve_fit(sigmoid, tt_x, tt_y, p0=[0.58, 1992])
y = sigmoid(year_array, *popt)
Ridge_model_dict[i] = {'pred': y.tolist()}
else:
temp_df = df_TFW.loc[df_TFW['AreaId'] == i]
temp_x = temp_df['Year'].values.reshape(-1, 1)
temp_y = temp_df['Value'].values.reshape(-1, 1)
# Detect outliers and remove it
test_Q1 = np.quantile(temp_y, 0.25)
test_Q3 = np.quantile(temp_y, 0.75)
IQR = test_Q3 - test_Q1
exist_outlier = [str(i[0]) for i in (temp_y < (test_Q1 - 1.5 * IQR)) | (temp_y > (test_Q3 + 1.5 * IQR))]
if 'True' in exist_outlier:
temp_x = temp_x[~((temp_y < (test_Q1 - 1.5 * IQR)) | (temp_y > (test_Q3 + 1.5 * IQR)))].reshape(-1, 1)
temp_y = temp_y[~((temp_y < (test_Q1 - 1.5 * IQR)) | (temp_y > (test_Q3 + 1.5 * IQR)))].reshape(-1, 1)
elif (i == 48): #<NAME>
temp_x = np.insert(temp_x, 0, 2001).reshape(-1,1)
temp_y = np.insert(temp_y, 0, 2.2).reshape(-1,1)
# Build a Ridge Regressor
temp_Ridge = Ridge(alpha=0.9, normalize=True)
temp_Ridge.fit(temp_x, temp_y)
temp_y_pred = temp_Ridge.predict(temp_x)
Ridge_model_dict[i] = {'m': temp_Ridge.coef_[0], 'b': temp_Ridge.intercept_,
'r2': metrics.r2_score(temp_y, temp_y_pred)}
Ridge_model_dict[i].update({'pred': temp_Ridge.predict(Year).reshape(len(Year), ).tolist()})
df_Ridge_model = pd.DataFrame.from_dict(Ridge_model_dict, orient='index')
df_Ridge_model.index.names = ['AreaId']
df_Ridge_predicted = df_Ridge_model[['pred']]
df_Ridge_predicted = df_Ridge_predicted.pred.apply(pd.Series)
df_Ridge_predicted['AreaId'] = df_Ridge_predicted.index
df_Ridge_predicted.rename(columns={key: value[0] for key, value in zip(range(len(Year)), Year)}, inplace=True)
################### Create year bin
df_yr_base = pd.DataFrame(data=np.arange(1978, 2018, 1),
index=np.arange(0, 2018 - 1978, 1),
columns=['Year'])
df_yr_base['bucket'] = np.nan
count = 0
x = 1
for i in range(df_yr_base.shape[0]):
df_yr_base['bucket'][i] = 'bucket' + str(x)
count += 1
if count == 5:
x += 1
count = 0
df_base = pd.merge(left=df_TFW[['Area', 'AreaId']].drop_duplicates().assign(foo=1),
right=df_yr_base.assign(foo=1),
left_on='foo',
right_on='foo')
df_base.drop(columns=['foo'], inplace=True)
df_TFW_merged = pd.merge(left=df_base,
right=df_TFW,
left_on=['Area', 'AreaId', 'Year'],
right_on=['Area', 'AreaId', 'Year'],
how='left')
df_TFW_merged_bucket_avg = df_TFW_merged.groupby(['Area', 'bucket'], as_index=False).mean()
df_TFW_merged_bucket_avg.drop(columns='bucket', inplace=True)
df_Ridge_pred_melt = pd.melt(df_Ridge_predicted, id_vars=['AreaId'],
value_vars=[1980, 1985, 1990, 1995, 2000, 2005, 2010, 2015])
df_Ridge_pred_melt.columns = ['AreaId', 'Year', 'pred_val']
df_TFW_comb = pd.merge(left=df_TFW_merged_bucket_avg, right=df_Ridge_pred_melt,
left_on=['AreaId', 'Year'], right_on=['AreaId', 'Year'], how='left')
df_TFW_comb.loc[pd.isnull(df_TFW_comb['Value']), 'Value'] = df_TFW_comb.loc[ | pd.isnull(df_TFW_comb['Value']) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 12:42:36 2020
@author: User
"""
# import sys
from pathlib import Path
# from collections import namedtuple
# from datetime import datetime
import numpy as np
# import itertools
from typing import Dict, List
from cmath import phase
from dataclasses import dataclass, field
import os
import multiprocessing
# from functools import partial
from itertools import repeat
import pandas as pd
from file_py_helper.find_folders import FindExpFolder
from file_py_helper.file_functions import FileOperations
from EIS.fitting import Fit_Spectrum, Fit_Spectra_Collection
from EIS.models import Model_Collection
# prepare_rand_models
from EC_DataLoader.CreateCV import create_CVs as create_CVs
# from EIS.plotting import (
# EIS_plotting_per_EV,
# EIS_Trimming_plot,
# EIS_plotting_EvRHE,
# )
import logging
_logger = logging.getLogger(__name__)
globals()["EvRHE"] = "E_AppV_RHE"
# print('TODO: eis run fix MULTIPROCESSING!')
# TODO: do it
# Meta = namedtuple('Meta', 'PAR_file Segment E_dc_RHE E_dc_RHE_mV RPM_DAC data ovv')
@dataclass(order=True, frozen=False)
class EIS_Spectrum:
"""EIS Spectrum dataclass.\n
Holds spectrum raw data in pd.DataFrame and metadata"""
_required_cols = ["Frequency(Hz)", "Z Real", "Z Imag"]
_spectrum_grp_cols = ["PAR_file", "Segment #", EvRHE, "RPM_DAC"]
PAR_file: Path = field(default=Path(Path.cwd().joinpath("empty.txt")))
Segment: int = 0
E_dc_RHE: float = 0.0
RPM_DAC: float = 0.0
data: type(pd.DataFrame) = field(default=pd.DataFrame(), repr=False)
ovv: type(pd.DataFrame) = field(default=pd.DataFrame(), repr=False)
EIS_kwargs: Dict = field(default_factory=dict, repr=False)
def __post_init__(self):
# self.E_dc_RHE = np.round(self.E_dc_RHE, 3)
self.E_dc_RHE_mV = np.round(self.E_dc_RHE * 1e3, 3)
self.EvRHE = self.E_dc_RHE
self.add_complex_impedance_columns()
self.check_freqlim()
def __repr__(self):
_file = f'File: "{self.PAR_file}"'
_data = f"at {self.E_dc_RHE_mV} mV with {self.RPM_DAC} rpm, data({len(self.data)}) and ovv({len(self.ovv)})"
_keys = f"attrs: {self.E_dc_RHE_mV} mV with {self.RPM_DAC} rpm, data({len(self.data)}) and ovv({len(self.ovv)})"
return _file + "\n" + _data + "\n" + _keys
def add_complex_impedance_columns(self):
# ['Frequency(Hz)'
_check_cols = []
if hasattr(self.data, "columns"):
_check_cols = [i in self.data.columns for i in self._required_cols]
EIS_data_raw = pd.DataFrame()
if all(_check_cols):
_add_cols = {}
freq = self.data["Frequency(Hz)"].values
_add_cols.update(
{
"Frequency(Hz)": freq,
"Angular": freq * 2 * np.pi,
"Ang_Warburg": 1 / (np.sqrt(freq * 2 * np.pi)),
}
)
Zre, Zim = self.data["Z Real"].values, self.data["Z Imag"].values
Zdata = Zre + 1j * Zim
Ydata = Zdata ** -1
Yre, Yim = Ydata.real, Ydata.imag
# DataWeights_modulus_Z = np.sqrt((Zre**2+Zim**2))
# 'lmfit_weights_mod_Z' : DataWeights_modulus_Z
DataWeights_modulus_Y = np.sqrt((Zre ** 2 + Zim ** 2)) ** -1
_add_cols.update(
{
"DATA_Z": Zdata,
"DATA_Zre": Zre,
"DATA_Zim": Zim,
"DATA_-Zim": -1 * Zim,
"DATA_Zphase": [phase(i) for i in Zdata],
"DATA_Zmod": abs(Zdata),
"DATA_Zangle": np.angle(Zdata, deg=True),
"DATA_-Zangle": -1 * np.angle(Zdata, deg=True),
"DATA_Y": Ydata,
"DATA_Yre": Yre,
"DATA_Yim": Yim,
"Valid": True,
"lmfit_weights_mod_Y": DataWeights_modulus_Y,
"lmfit_weights_unit": Zre / Zre,
"lmfit_weights_prop": Ydata,
}
)
_meta_info_cols = {
key: val
for key, val in vars(self).items()
if any([i in type(val).__name__ for i in ["int", "float", "Path"]])
}
_add_cols.update(_meta_info_cols)
EIS_data_raw = pd.DataFrame(_add_cols, index=self.data.index)
EIS_data_raw = EIS_data_raw.sort_values(by="Frequency(Hz)", ascending=True)
elif self.data.empty:
if not "empty" in self.PAR_file.name:
_logger.error(f"Error in EIS_spectrum, empty data for {self}")
else:
_logger.error(
"Error in EIS_spectrum, missing columns:",
", ".join(
[i for i in self.data.columns if i not in self._required_cols]
),
)
raise ValueError
self.EIS_data = EIS_data_raw
def check_freqlim(self):
if not self.EIS_data.empty:
FreqLim = self.EIS_kwargs.get("FreqLim", 30e3)
self.EIS_data.loc[
(
(self.EIS_data["Frequency(Hz)"] >= FreqLim)
| (self.EIS_data["DATA_Zre"] < 0)
| (np.abs(self.EIS_data["DATA_Zre"]) > 1e4)
| (np.abs(self.EIS_data["DATA_Zim"]) > 1e4)
),
"Valid",
] = False # TODO
# & (self.EIS_data['DATA_-Zim'] < -15
self.EIS_data_freqlim = self.EIS_data.query("Valid == True")
def EIS_exp_fit_cols(self):
_check = ["PAR_file", "Segment", EvRHE, "RPM_DAC", "Model_EEC"]
return list(self.__dataclass_fields__.keys())[:-3] + ["Model_EEC"]
@dataclass(order=True)
class EIS_spectra_collection:
global EvRHE
PAR_file: Path = field(default=Path)
spectra: List[EIS_Spectrum] = field(default=list)
data: type(pd.DataFrame) = field(default=pd.DataFrame(), repr=False)
ovv: type(pd.DataFrame) = field(default=pd.DataFrame(), repr=False)
EIS_kwargs: Dict = field(default_factory=dict, repr=False)
def __post_init__(self):
self.check_if_list_contains_EIS_spectra()
self.make_mean_spectrum()
def check_if_list_contains_EIS_spectra(self):
_check = [isinstance(i, EIS_Spectrum) for i in self.spectra]
if not all(_check) and self.spectra:
raise ValueError
def make_mean_spectrum(self):
_new_PF_mean = self.PAR_file.with_name(
self.PAR_file.stem + "_fakeZmean" + self.PAR_file.suffix
)
_PF_mean_ovv = self.ovv.loc[self.ovv.PAR_file == self.PAR_file]
# _PF_mean_ovv['Measured_OCP'] = [i[0] for i in _PF_mean_ovv['_act0_Measured Open Circuit'].str.split()]
# _PF_mean_ovv['PAR_file'] = _new_PF_mean
_PF_mean_ovv = _PF_mean_ovv.assign(**{"PAR_file": _new_PF_mean})
_mean_E_selection = [
i
for i in self.spectra
if i.E_dc_RHE > 0.4 and i.E_dc_RHE < 0.85 and "O2" in self.PAR_file.name
]
if _mean_E_selection:
_PF_data = pd.concat(i.data for i in _mean_E_selection)
else:
_PF_data = | pd.concat(i.data for i in self.spectra) | pandas.concat |
import xlwings as xw
import math
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import sympy.physics.units as u
from pint import UnitRegistry
from functools import reduce
import xml.etree.ElementTree as ET
import requests
import pathlib
# import pandas as pd
from hidroweb_downloader.download_from_api_BATCH import Hidroweb_BatchDownload
u = UnitRegistry()
# wb = xw.Book()
@xw.sub # only required if you want to import it or run it via UDF Server
def main():
wb = xw.Book.caller()
wb.sheets[0].range("A1").value = "Hello xlwings!"
# @xw.func
# def testando():
# return 0
@xw.func
def download_HidrowebInventario():
api_inventario = 'http://telemetriaws1.ana.gov.br/ServiceANA.asmx/HidroInventario'
params = {'codEstDE':'','codEstATE':'','tpEst':'','nmEst':'','nmRio':'','codSubBacia':'',
'codBacia':'','nmMunicipio':'','nmEstado':'','sgResp':'','sgOper':'','telemetrica':''}
response = requests.get(api_inventario, params)
tree = ET.ElementTree(ET.fromstring(response.content))
root = tree.getroot()
data = {'BaciaCodigo':[],'SubBaciaCodigo':[],'RioCodigo':[],'RioNome':[],'EstadoCodigo':[],
'nmEstado':[],'MunicipioCodigo':[],'nmMunicipio':[],'ResponsavelCodigo':[],
'ResponsavelSigla':[],'ResponsavelUnidade':[],'ResponsavelJurisdicao':[],
'OperadoraCodigo':[],'OperadoraSigla':[],'OperadoraUnidade':[],'OperadoraSubUnidade':[],
'TipoEstacao':[],'Codigo':[],'Nome':[],'CodigoAdicional':[],'Latitude':[],'Longitude':[],
'Altitude':[],'AreaDrenagem':[],'TipoEstacaoEscala':[],'TipoEstacaoRegistradorNivel':[],
'TipoEstacaoDescLiquida':[],'TipoEstacaoSedimentos':[],'TipoEstacaoQualAgua':[],
'TipoEstacaoPluviometro':[],'TipoEstacaoRegistradorChuva':[],'TipoEstacaoTanqueEvapo':[],
'TipoEstacaoClimatologica':[],'TipoEstacaoPiezometria':[],'TipoEstacaoTelemetrica':[],'PeriodoEscalaInicio':[],'PeriodoEscalaFim':[] ,
'PeriodoRegistradorNivelInicio' :[],'PeriodoRegistradorNivelFim' :[],'PeriodoDescLiquidaInicio' :[],'PeriodoDescLiquidaFim':[] ,'PeriodoSedimentosInicio' :[],
'PeriodoSedimentosFim':[] ,'PeriodoQualAguaInicio':[] ,'PeriodoQualAguaFim' :[],'PeriodoPluviometroInicio':[] ,'PeriodoPluviometroFim':[] ,
'PeriodoRegistradorChuvaInicio' :[],'PeriodoRegistradorChuvaFim' :[],'PeriodoTanqueEvapoInicio':[] ,'PeriodoTanqueEvapoFim':[] ,'PeriodoClimatologicaInicio' :[],'PeriodoClimatologicaFim':[] ,
'PeriodoPiezometriaInicio':[] ,'PeriodoPiezometriaFim' :[],'PeriodoTelemetricaInicio' :[],'PeriodoTelemetricaFim' :[],
'TipoRedeBasica' :[],'TipoRedeEnergetica' :[],'TipoRedeNavegacao' :[],'TipoRedeCursoDagua' :[],
'TipoRedeEstrategica':[] ,'TipoRedeCaptacao':[] ,'TipoRedeSedimentos':[] ,'TipoRedeQualAgua':[] ,
'TipoRedeClasseVazao':[] ,'UltimaAtualizacao':[] ,'Operando':[] ,'Descricao':[] ,'NumImagens':[] ,'DataIns':[] ,'DataAlt':[]}
# print(root.tag)
for i in root.iter('Table'):
for j in data.keys():
d = i.find('{}'.format(j)).text
if j == 'Codigo':
data['{}'.format(j)].append('{:08}'.format(int(d)))
else:
data['{}'.format(j)].append(d)
print(len(list(root.iter('Table'))))
# print(data)
df = pd.DataFrame(data)
cwd = pathlib.Path(__file__).parent.absolute()/'Hidroweb_Inventario'
cwd.mkdir(parents=True, exist_ok=True)
df.to_csv(cwd/'Inventario.csv')
@xw.func
def find_Code(estado, min_areaDrenagem, max_areaDrenagem):
hidrowebInventario_path = pathlib.Path(r'C:\Users\User\git\Excel-python\test\Hidroweb_Inventario\Inventario.csv')
inventario = pd.read_csv(hidrowebInventario_path)
df = inventario.loc[(inventario['nmEstado']==estado)&
(inventario['AreaDrenagem']>=min_areaDrenagem)&
(inventario['AreaDrenagem']<=max_areaDrenagem),
['Codigo', 'AreaDrenagem']]
a = []
for i, row in df.iterrows():
download_HidrowebStation(estado=estado, min_areaDrenagem=min_areaDrenagem, max_areaDrenagem=max_areaDrenagem,
codigo=row['Codigo'])
return df
@xw.func
def download_HidrowebStation(estado, min_areaDrenagem, max_areaDrenagem, codigo):
cwd = pathlib.Path(__file__).parent.absolute()/f'Hidroweb_Stations_min{min_areaDrenagem}_max{max_areaDrenagem}'
cwd.mkdir(parents=True, exist_ok=True)
d = Hidroweb_BatchDownload()
a = d.download_ANA_stations(station=int(codigo), typeData=3, folder_toDownload=cwd)
return 0
def merge_HidrowebStation(estado, min_areaDrenagem, max_areaDrenagem):
cwd = pathlib.Path(__file__).parent.absolute()/f'Hidroweb_Stations_min{min_areaDrenagem}_max{max_areaDrenagem}'
dfs = []
for data in cwd.rglob('3*.csv'):
df = | pd.read_csv(data, parse_dates=['Date']) | pandas.read_csv |
import pandas as pd
import random
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from datasets import load_data, random_benchmark, list_datasets
from sklearn.preprocessing import RobustScaler
from tensorflow.keras.preprocessing.sequence import pad_sequences
def flatten_ts(train, test):
new_train, new_test = [], []
for _, row in train.iterrows():
new_list = []
for i in row.index:
row[i] = row[i].dropna()
for j in range(len(row[i])):
new_list.append(row[i][j])
new_train.append(new_list)
for _, row in test.iterrows():
new_list = []
for i in row.index:
row[i] = row[i].dropna()
for j in range(len(row[i])):
new_list.append(row[i][j])
new_test.append(new_list)
train_df = pd.DataFrame(new_train)
test_df = pd.DataFrame(pad_sequences(new_test, maxlen=train_df.shape[1], dtype='float32'))
scaler = RobustScaler()
scaler.fit(train_df)
return scaler.transform(train_df.dropna()), scaler.transform(test_df.dropna())
def remove_and_impute(train_data, test_data, missing_rate, method='mean'):
train, test = flatten_ts(train_data, test_data)
new_train = | pd.DataFrame(train) | pandas.DataFrame |
import pandas as pd
import numpy as np
from . import raw_TCGA_path, processed_TCGA_path
from .dataset import Dataset
raw_genexp_path = raw_TCGA_path / "genexp"
def load_raw_genexp_cohort(cohort_name):
df = pd.read_table(raw_genexp_path / (cohort_name.upper() + ".gz"), index_col=0).T
df.columns.name = "gene"
columns_sorted = sorted(df.columns)
df = df[columns_sorted]
df.index.name = "sample"
df = df.sort_index()
return df
def __load_index(index_path):
return | pd.read_table(index_path, index_col=0, squeeze=True) | pandas.read_table |
from datetime import datetime
from dateutil import tz
import glob
import os
import pandas as pd
import sys
import time
def format_source():
"""
"""
print("running format_source")
sensors = ['EDA', 'HR', 'TEMP']
path_folders = os.path.join('..', '..', 'source_measurements', 'PMR', 'ref' )
save_file = os.path.join(path_folders, 'source_list_02' + '.csv' )
df = | pd.read_csv(save_file) | pandas.read_csv |
import numpy as np
import pandas as pd
import re
import string
import nltk
from nltk import word_tokenize
nltk.download('stopwords')
### Load data from csv -------------------------------
print("Loading data from csv...")
train_data = pd.read_csv("train.csv") # Data set used to train.
test_data = pd.read_csv("dev.csv") # Data set used to set.
train_data = train_data[train_data.label!=0]
test_data = test_data[test_data.label!=0]
# Add a new column called 'edited' as the edited headline.
train_data["edited"] = train_data.apply(
lambda x: re.sub(r"<.+/>", x["edit"], x["original"]), axis=1
)
test_data["edited"] = test_data.apply(
lambda x: re.sub(r"<.+/>", x["edit"], x["original"]), axis=1
)
train_data["original"] = train_data["original"].str.replace(r"<(.+)/>", "\g<1>")
test_data["original"] = test_data["original"].str.replace(r"<(.+)/>", "\g<1>")
# Remove stop words
from nltk.corpus import stopwords
stop_words = set(stopwords.words("english"))
punctuations = string.punctuation
# Split the train data into train set and test set
### Pre-processing -----------------------------------
print("Loading word2vec model...")
# Using word2vec trained by Google (We don't need to train embedding model ourself)
# https://code.google.com/archive/p/word2vec/
import gensim.downloader as downloader
# Download the *pretrained* word2vec model.
# This size of the model is 1662.8MB so it might take a while.
word2vec = downloader.load("word2vec-google-news-300")
def tokenize(text):
tokens = word_tokenize(text.lower())
# Punctuation Removal
tokens = [one for one in tokens if one not in stop_words]
# Stop words removal
tokens = [one for one in tokens if one not in punctuations]
return tokens
def convert2vector(headlines):
"""
Convert a list of headlines to a list of vectors.
"""
corpus = [tokenize(one) for one in headlines]
vectors = []
for tokenized_headline in corpus:
# Only consider words that's in the word2vec model.
tokenized_headline = [one for one in tokenized_headline if one in word2vec.vocab]
# The vector for each headline is the mean value of all vectors for words.
vector = np.mean(word2vec[tokenized_headline], axis=0)
vectors.append(vector)
return np.array(vectors)
print("Tokenizing words...")
x_train_original = convert2vector(train_data["original"])
x_train_edited = convert2vector(train_data["edited"])
# Combine original headlines with edited headlines
# [[...Original, ...Edited], [...Original, ...Edited], [...Original, ...Edited], ...]
x_train = np.array([
np.concatenate((x_train_original[i], x_train_edited[i]), axis=None) for i in range(len(x_train_original))
])
# x_train_all = x_train_all.reshape(x_train_all.shape[0], 600, 1)
y_train = train_data["meanGrade"]
y_train = y_train.to_numpy().reshape(y_train.shape[0], 1)
x_test_original = convert2vector(test_data["original"])
x_test_edited = convert2vector(test_data["edited"])
# Combine original headlines with edited headlines
# [[...Original, ...Edited], [...Original, ...Edited], [...Original, ...Edited], ...]
x_test = np.array([
np.concatenate((x_test_original[i], x_test_edited[i]), axis=None) for i in range(len(x_test_original))
])
# x_train_all = x_train_all.reshape(x_train_all.shape[0], 600, 1)
y_test = test_data["meanGrade"]
y_test = y_test.to_numpy().reshape(y_test.shape[0], 1)
# Prepare for CNN
x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))
### Train the model ---------------------------------------
import tensorflow as tf
from tensorflow import keras
# Split the training set into train set and test set
# TRAIN_RATIO = 0.90 # Let 90% of the data used for training
#
# x_train, x_test = tf.split(x_train_all, [int(x_train_all.shape[0] * TRAIN_RATIO), x_train_all.shape[0] - int(x_train_all.shape[0] * TRAIN_RATIO)])
# y_train, y_test = tf.split(y_train_all, [int(x_train_all.shape[0] * TRAIN_RATIO), x_train_all.shape[0] - int(x_train_all.shape[0] * TRAIN_RATIO)])
# Parameters
LEARNING_RATE = 0.001
TRAINING_EPOCHS = 15
BATCH_SIZE = 100
# Building a FFNN network, Linear Regression
model = keras.Sequential([
keras.layers.Conv1D(10, 20, activation='relu', input_shape=x_train.shape[1:]),
keras.layers.Reshape(target_shape=[10 * 581]),
keras.layers.Dense(64, activation='softmax'),
keras.layers.Dropout(0.2),
keras.layers.Dense(1) # Output layer
])
optimizer = tf.keras.optimizers.RMSprop(learning_rate=LEARNING_RATE)
# Using mean square error as loss function
model.compile(loss=keras.losses.mean_squared_error, optimizer=optimizer, metrics=['mse'])
model.summary()
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=TRAINING_EPOCHS)
#######
model.evaluate(x_test, y_test, verbose=2)
# Testing the rmse of predicting result
def rmse(pred, traget):
return np.sqrt(np.mean((pred - traget) ** 2))
def roundToOneDecimal(value):
value = value.reshape(value.shape[0])
roundedValue = np.array([round(one, 1) for one in value])
return roundedValue.reshape([value.shape[0], 1])
y_pred = model.predict(x_test)
# y_pred = roundToOneDecimal(y_pred)
print("RMSE: {}".format(rmse(y_pred, y_test)))
### Predict the final result -------------------------
# test.csv doesn't have labels. This file will be used for grading.
grading_data = pd.read_csv("test.csv")
grading_data["edited"] = grading_data.apply(
lambda x: re.sub(r"<.+/>", x["edit"], x["original"]), axis=1
)
grading_data["original"] = train_data["original"].str.replace(r"<(.+)/>", "\g<1>")
x_grading_original = convert2vector(grading_data["original"])
x_grading_edited = convert2vector(grading_data["edited"])
x_grading = np.array([
np.concatenate((x_grading_original[i], x_grading_edited[i]), axis=None) for i in range(len(x_grading_original))
])
x_grading = x_grading.reshape((x_grading.shape[0], x_grading.shape[1], 1))
y_grading = model.predict(x_grading)
# y_grading = roundToOneDecimal(y_grading)
y_grading = y_grading.reshape([x_grading.shape[0]])
result = | pd.DataFrame({"id": grading_data["id"], "pred": y_grading}) | pandas.DataFrame |
import matplotlib
matplotlib.use('Agg')
#from matplotlib import rcParams
#rcParams['text.latex.preamble'] = r'\newcommand{\mathdefault}[1][]{}'
import matplotlib.pyplot as plt
import os
import re
import json
import shutil
import pickle
import optparse
import itertools
import numpy as np
import scipy as sp
import pandas as pd
from corner import corner
from datetime import datetime
from bilby import result as br
from chainconsumer import ChainConsumer
from dateutil.parser import parse as pdate
from enterprise_extensions.frequentist.optimal_statistic import \
OptimalStatistic as OptStat
from enterprise.signals import signal_base
from . import enterprise_warp
def parse_commandline():
"""
Parsing command line arguments for action on results
"""
parser = optparse.OptionParser()
parser.add_option("-r", "--result", help="Output directory or a parameter \
file. In case of individual pulsar analysis, specify a \
directory that contains subdirectories with individual \
pulsar results. In case of an array analysis, specify a \
directory with result files.", \
default=None, type=str)
parser.add_option("-i", "--info", help="Print information about all results. \
In case \"-n\" is specified, print an information about \
results for a specific pulsar.", \
default=0, type=int)
parser.add_option("-n", "--name", help="Pulsar name or number (or \"all\")", \
default="all", type=str)
parser.add_option("-c", "--corner", help="Plot corner (0 - no corner, 1 - \
corner, 2 - chainconsumer), ", default=0, type=int)
parser.add_option("-p", "--par", help="Include only model parameters that \
contain \"par\" (more than one could be added)",
action="append", default=None, type=str)
parser.add_option("-a", "--chains", help="Plot chains (1/0)", \
default=0, type=int)
parser.add_option("-b", "--logbf", help="Display log Bayes factors (1/0)", \
default=0, type=int)
parser.add_option("-f", "--noisefiles", help="Make noisefiles (1/0)", \
default=0, type=int)
parser.add_option("-l", "--credlevels", help="Credible levels (1/0)", \
default=0, type=int)
parser.add_option("-m", "--covm", help="Collect PTMCMCSampler covariance \
matrices (1/0)", default=0, type=int)
parser.add_option("-u", "--separate_earliest", help="Separate the first MCMC \
samples (fraction). Optional: add --par to also separate \
the chain with only --par columns.", default=0., type=float)
parser.add_option("-s", "--load_separated", help="Attempt to load separated \
chain files with names chain_DATETIME(14-symb)_PARS.txt. \
If --par are supplied, load only files with --par \
columns.", default=0, type=int)
parser.add_option("-o", "--optimal_statistic", help="Calculate optimal \
statistic and make key plots (1/0)", default = 0,
type = int)
parser.add_option("-g", "--optimal_statistic_orfs", help = "Set overlap \
reduction function form for optimal statistic analysis. \
Allowed options: \
all, hd (Hellings-Downs), quadrupole, dipole, monopole",
default = "hd,dipole,monopole", type = str)
parser.add_option("-N", "--optimal_statistic_nsamples", help = "Set integer \
number of samples for noise-marginalised optimal statistic \
analysis.",
default = 1000, type = int)
parser.add_option("-L", "--load_optimal_statistic_results", help = "load \
results from optimal statistic analysis. Do not recalculate\
any results. (1/0)",
default = 0, type = int)
parser.add_option("-y", "--bilby", help="Load bilby result", \
default=0, type=int)
parser.add_option("-P", "--custom_models_py", help = "Full path to a .py \
file with custom enterprise_warp model object, derived \
from enterprise_warp.StandardModels. It is only needed to \
correctly load a parameter file with unknown parameters. \
An alteriative: just use full path for --result, not \
the parameter file.",
default = None, type = str)
parser.add_option("-M", "--custom_models", help = "Name of the custom \
enterprise_warp model object in --custom_models_py.",
default = None, type = str)
opts, args = parser.parse_args()
return opts
def get_HD_curve(zeta):
coszeta = np.cos(zeta)
xip = (1.-coszeta) / 2.
HD = 3.*( 1./3. + xip * ( np.log(xip) -1./6.) )
return HD/2.0
def get_dipole_curve(zeta):
coszeta = np.cos(zeta)
return coszeta
def get_monopole_curve(zeta):
return zeta * 0.0
def dist_mode_position(values, nbins=50):
"""
Parameters
----------
values: float
Values of a distribution
method: int
Approximating a distribution with a histogram with this number of bins
Returns
-------
value : float
Position of the largest frequency bin
"""
nb, bins, patches = plt.hist(values, bins=nbins)
plt.close()
return bins[np.argmax(nb)]
def suitable_estimator(levels, errorbars_cdf = [16,84]):
"""
Returns maximum-posterior value (posterior mode position) if it is within
credible levels, otherwise returns 50%-CDF value.
The function complements estimate_from_distribution().
"""
if levels['maximum'] < levels[str(errorbars_cdf[1])] and \
levels['maximum'] > levels[str(errorbars_cdf[0])]:
return levels['maximum'], 'maximum'
else:
return levels['50'], '50'
def estimate_from_distribution(values, method='mode', errorbars_cdf = [16,84]):
"""
Return estimate of a value from a distribution (i.e., an MCMC posterior)
Parameters
----------
values: float
Values of a distribution
method: str
Currently available: mode or median
Returns
-------
value : float
Position of a mode or a median of a distribution, along the "values" axis
"""
if method == 'median':
return np.median(values)
elif method == 'mode':
return dist_mode_position(values)
elif method == 'credlvl':
levels = dict()
levels['median'] = np.median(values)
levels['maximum'] = dist_mode_position(values)
levels[str(errorbars_cdf[0])] = \
np.percentile(values, errorbars_cdf[0], axis=0)
levels[str(errorbars_cdf[1])] = \
np.percentile(values, errorbars_cdf[1], axis=0)
levels[str(50)] = np.percentile(values, 50, axis=0)
return levels
def make_noise_dict(psrname, chain, pars, method='mode', suffix = 'noise', \
outdir = 'noisefiles/', recompute = True):
"""
Create noise dictionary for a given MCMC or nested sampling chain.
This is a dict that assigns a characteristic value (mode/median)
to a parameter from the distribution of parameter values in a chain.
Can be used for outputting a noise file or for use in further
analysis (e.g. optimal statistic)
"""
result_filename = outdir + '/' + psrname + '_' + suffix + '.json'
if not recompute:
if os.path.exists(result_filename):
xx = json.load(open(result_fileneame, 'r'))
return(xx)
xx = {}
for ct, par in enumerate(pars):
xx[par] = estimate_from_distribution(chain[:,ct], method=method)
return xx
def make_noise_files(psrname, chain, pars, outdir='noisefiles/',
method='mode', suffix='noise'):
"""
Create noise files from a given MCMC or nested sampling chain.
Noise file is a dict that assigns a characteristic value (mode/median)
to a parameter from the distribution of parameter values in a chain.
"""
xx = make_noise_dict(psrname, chain, pars, method = method)
os.system('mkdir -p {}'.format(outdir))
with open(outdir + '/' + psrname + '_' + suffix + '.json', 'w') as fout:
json.dump(xx, fout, sort_keys=True, indent=4, separators=(',', ': '))
def check_if_psr_dir(folder_name):
"""
Check if the folder name (without path) is in the enterprise_warp format:
integer, underscore, pulsar name.
"""
return bool(re.match(r'^\d{1,}_[J,B]\d{2,4}[+,-]\d{4,4}[A,B]{0,1}$',
folder_name))
class OptimalStatisticResult(object):
def __init__(self, OptimalStatistic, params, xi, rho, sig, OS, OS_err):
self.OptimalStatistic = OptimalStatistic #OptimalStatistic object
self.params = params #optimal statistic parameters
self.xi = xi
self.rho = rho
self.sig = sig
self.OS = OS #the actual A^2 optimal statistic
self.OS_err = OS_err #optimal statistic error
def add_marginalised(self, marginalised_os, marginalised_os_err):
self.marginalised_os = marginalised_os
self.marginalised_os_err = marginalised_os_err
def weightedavg(self, _rho, _sig):
weights, avg = 0., 0.
for r,s in zip(_rho, _sig):
weights += 1./(s*s)
avg += r/(s*s)
return avg/weights, np.sqrt(1./weights)
#return np.average(_rho, _sig**-2.0), np.sqrt(np.sum(_sig**-2.0))
def bin_crosscorr(self, zeta):
idx = np.argsort(self.xi)
xi_sorted = self.xi[idx]
rho_sorted = self.rho[idx]
sig_sorted = self.sig[idx]
rho_avg, sig_avg = np.zeros(len(zeta)), np.zeros(len(zeta))
for i,z in enumerate(zeta[:-1]):
_rhos, _sigs = [], []
for x,r,s in zip(xi,rho,sig):
if x >= z and x < (z+10.):
_rhos.append(r)
_sigs.append(s)
rho_avg[i], sig_avg[i] = self.weightedavg(_rhos, _sigs)
return rho_avg, sig_avg
def avg_ostat_bins(self, n_psr):
# sort the cross-correlations by xi
idx = np.argsort(self.xi)
xi_sorted = self.xi[idx]
rho_sorted = self.rho[idx]
sig_sorted = self.sig[idx]
# bin the cross-correlations so that there are the same number of \
#pairs per bin
# n_psr = len(self.psrs)
npairs = int(n_psr*(n_psr - 1.0)/2.0)
xi_avg = []
xi_err = []
rho_avg = []
sig_avg = []
i = 0
while i < len(xi_sorted):
xi_avg.append(np.mean(xi_sorted[i:int(npairs/8)+i]))
xi_err.append(np.std(xi_sorted[i:int(npairs/8)+i]))
r, s = self.weightedavg(rho_sorted[i:int(npairs/8)+i], \
sig_sorted[i:npairs+i])
rho_avg.append(r)
sig_avg.append(s)
i += int(npairs/8)
xi_avg = np.array(xi_avg)
xi_err = np.array(xi_err)
rho_avg = np.array(rho_avg)
sig_avg = np.array(sig_avg)
#do we want to return these or add them as class attributes?
self.xi_avg = xi_avg
self.xi_err = xi_err
self.rho_avg = rho_avg
self.sig_avg = sig_avg
#return xi_mean, xi_err, rho_avg, sig_avg
class EnterpriseWarpResult(object):
def __init__(self, opts, custom_models_obj=None):
self.opts = opts
self.custom_models_obj = custom_models_obj
self.interpret_opts_result()
self.get_psr_dirs()
def main_pipeline(self):
self._reset_covm()
for psr_dir in self.psr_dirs:
self.psr_dir = psr_dir
success = self._scan_psr_output()
if not success:
continue
self._get_covm()
if not (self.opts.noisefiles or self.opts.logbf or self.opts.corner or \
self.opts.chains):
continue
success = self.load_chains()
if not success:
continue
self._separate_earliest()
self._make_noisefiles()
self._get_credible_levels()
self._print_logbf()
self._make_corner_plot()
self._make_chain_plot()
self._save_covm()
def _scan_psr_output(self):
self.outdir = self.outdir_all + '/' + self.psr_dir + '/'
if self.opts.name is not 'all' and self.opts.name not in self.psr_dir:
return False
print('Processing ', self.psr_dir)
self.get_pars()
self.get_chain_file_name()
return True
def interpret_opts_result(self):
""" Determine output directory from the --results argument """
if os.path.isdir(self.opts.result):
self.outdir_all = self.opts.result
elif os.path.isfile(self.opts.result):
self.params = enterprise_warp.Params(self.opts.result, \
init_pulsars=False, \
custom_models_obj=self.custom_models_obj)
self.outdir_all = self.params.out + self.params.label_models + '_' + \
self.params.paramfile_label + '/'
else:
raise ValueError('--result seems to be neither a file, not a directory')
def get_psr_dirs(self):
""" Check if we need to loop over pulsar directories, or not """
out_subdirs = np.array(os.listdir(self.outdir_all))
psr_dir_mask = [check_if_psr_dir(dd) for dd in out_subdirs]
self.psr_dirs = out_subdirs[psr_dir_mask]
if self.psr_dirs.size == 0:
self.psr_dirs = np.array([''])
def get_chain_file_name(self):
if self.opts.load_separated:
outdirfiles = next(os.walk(self.outdir))[2]
self.chain_file = list()
for ff in outdirfiles:
if len(ff.split('_')) < 2: continue
timestr = ff.split('_')[1]
if self.par_out_label=='' and timestr[-4:]=='.txt':
timestr = timestr[:-4]
elif self.par_out_label!='':
pass
else:
continue
if not (timestr.isdigit() and len(timestr)==14):
continue
#if self.par_out_label=='':
# if ff.split('_')[2]!=self.par_out_label:
# continue
self.chain_file.append(self.outdir + ff)
if not self.chain_file:
self.chain_file = None
print('Could not find chain file in ',self.outdir)
else:
if os.path.isfile(self.outdir + '/chain_1.0.txt'):
self.chain_file = self.outdir + '/chain_1.0.txt'
elif os.path.isfile(self.outdir + '/chain_1.txt'):
self.chain_file = self.outdir + '/chain_1.txt'
else:
self.chain_file = None
print('Could not find chain file in ',self.outdir)
if self.opts.info and self.chain_file is not None:
print('Available chain file ', self.chain_file, '(',
int(np.round(os.path.getsize(self.chain_file)/1e6)), ' Mb)')
def get_pars(self):
self.par_out_label = '' if self.opts.par is None \
else '_'.join(self.opts.par)
if not os.path.exists(self.outdir + '/pars_' + self.par_out_label + '.txt'):
self.par_out_label = ''
if self.opts.load_separated and self.par_out_label!='':
self.pars = np.loadtxt(self.outdir + '/pars_' + self.par_out_label + \
'.txt', dtype=np.unicode_)
else:
self.pars = np.loadtxt(self.outdir + '/pars.txt', dtype=np.unicode_)
self._get_par_mask()
if self.opts.info and (self.opts.name != 'all' or self.psr_dir == ''):
print('Parameter names:')
for par in self.pars:
print(par)
def load_chains(self):
""" Loading PTMCMC chains """
if self.opts.load_separated:
self.chain = np.empty((0,len(self.pars)))
for ii, cf in enumerate(self.chain_file):
if ii==0:
self.chain = np.loadtxt(cf)
else:
self.chain = np.concatenate([self.chain, np.loadtxt(cf)])
else:
try:
self.chain = np.loadtxt(self.chain_file)
except:
print('Could not load file ', self.chain_file)
return False
if len(self.chain)==0:
print('Empty chain file in ', self.outdir)
return False
burn = int(0.25*self.chain.shape[0])
self.chain_burn = self.chain[burn:,:-4]
if 'nmodel' in self.pars:
self.ind_model = list(self.pars).index('nmodel')
self.unique, self.counts = np.unique(np.round( \
self.chain_burn[:, self.ind_model]), \
return_counts=True)
self.dict_real_counts = dict(zip(self.unique.astype(int),
self.counts.astype(float)))
else:
self.ind_model = 0
self.unique, self.counts, self.dict_real_counts = [None], None, None
return True
def _get_par_mask(self):
""" Get an array mask to select only parameters chosen with --par """
if self.opts.par is not None:
masks = list()
for pp in self.opts.par:
masks.append( [True if pp in label else False for label in self.pars] )
self.par_mask = np.sum(masks, dtype=bool, axis=0)
else:
self.par_mask = np.repeat(True, len(self.pars))
def _make_noisefiles(self):
if self.opts.noisefiles:
make_noise_files(self.psr_dir, self.chain_burn, self.pars,
outdir = self.outdir_all + '/noisefiles/')
def _get_credible_levels(self):
if self.opts.credlevels:
make_noise_files(self.psr_dir, self.chain_burn, self.pars,
outdir = self.outdir_all + '/noisefiles/',
suffix = 'credlvl', method='credlvl')
def _reset_covm(self):
self.covm = np.empty((0,0))
self.covm_pars = np.array([])
self.covm_repeating_pars = np.array([])
def _save_covm(self):
if self.opts.covm:
out_dict = {
'covm': self.covm,
'covm_pars': self.covm_pars,
'covm_repeating_pars': self.covm_repeating_pars,
}
with open(self.outdir_all+'covm_all.pkl', 'wb') as handle:
pickle.dump(out_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
df = | pd.DataFrame(self.covm, index=self.covm_pars, columns=self.covm_pars) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from pandas.compat import range, lrange
import numpy as np
from pandas import DataFrame, Series, Index, MultiIndex
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Column add, remove, delete.
class TestDataFrameMutateColumns(tm.TestCase, TestData):
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
# Non-Series array-like
result = df.assign(C=[4, 2.5, 2])
assert_frame_equal(result, expected)
# original is unmodified
assert_frame_equal(df, original)
result = df.assign(B=df.B / df.A)
expected = expected.drop('B', axis=1).rename(columns={'C': 'B'})
assert_frame_equal(result, expected)
# overwrite
result = df.assign(A=df.A + df.B)
expected = df.copy()
expected['A'] = [5, 7, 9]
assert_frame_equal(result, expected)
# lambda
result = df.assign(A=lambda x: x.A + x.B)
assert_frame_equal(result, expected)
def test_assign_multiple(self):
df = DataFrame([[1, 4], [2, 5], [3, 6]], columns=['A', 'B'])
result = df.assign(C=[7, 8, 9], D=df.A, E=lambda x: x.B)
expected = DataFrame([[1, 4, 7, 1, 4], [2, 5, 8, 2, 5],
[3, 6, 9, 3, 6]], columns=list('ABCDE'))
assert_frame_equal(result, expected)
def test_assign_alphabetical(self):
# GH 9818
df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
result = df.assign(D=df.A + df.B, C=df.A - df.B)
expected = DataFrame([[1, 2, -1, 3], [3, 4, -1, 7]],
columns=list('ABCD'))
assert_frame_equal(result, expected)
result = df.assign(C=df.A - df.B, D=df.A + df.B)
assert_frame_equal(result, expected)
def test_assign_bad(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
# non-keyword argument
with tm.assertRaises(TypeError):
df.assign(lambda x: x.A)
with | tm.assertRaises(AttributeError) | pandas.util.testing.assertRaises |
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_series_equal
from src.contact_models.contact_model_functions import _draw_nr_of_contacts
from src.contact_models.contact_model_functions import _draw_potential_vacation_contacts
from src.contact_models.contact_model_functions import (
_identify_ppl_affected_by_vacation,
)
from src.contact_models.contact_model_functions import (
calculate_non_recurrent_contacts_from_empirical_distribution,
)
from src.contact_models.contact_model_functions import go_to_daily_work_meeting
from src.contact_models.contact_model_functions import go_to_weekly_meeting
from src.contact_models.contact_model_functions import meet_daily_other_contacts
from src.contact_models.contact_model_functions import reduce_contacts_on_condition
from src.shared import draw_groups
@pytest.fixture
def params():
params = pd.DataFrame()
params["category"] = ["work_non_recurrent"] * 2 + ["other_non_recurrent"] * 2
params["subcategory"] = [
"symptomatic_multiplier",
"positive_test_multiplier",
] * 2
params["name"] = ["symptomatic_multiplier", "positive_test_multiplier"] * 2
params["value"] = [0.0, 0.0, 0.0, 0.0]
params.set_index(["category", "subcategory", "name"], inplace=True)
return params
@pytest.fixture
def states():
"""states DataFrame for testing purposes.
Columns:
- date: 2020-04-01 - 2020-04-30
- id: 50 individuals, with 30 observations each. id goes from 0 to 49.
- immune: bool
- infectious: bool
- age_group: ordered Categorical, either 10-19 or 40-49.
- region: unordered Categorical, ['Overtjssel', 'Drenthe', 'Gelderland']
- n_has_infected: int, 0 to 3.
- cd_infectious_false: int, -66 to 8.
- occupation: Categorical. "working" or "in school".
- cd_symptoms_false: int, positive for the first 20 individuals, negative after.
"""
this_modules_path = Path(__file__).resolve()
states = pd.read_parquet(this_modules_path.parent / "1.parquet")
old_to_new = {old: i for i, old in enumerate(sorted(states["id"].unique()))}
states["id"].replace(old_to_new, inplace=True)
states["age_group"] = pd.Categorical(
states["age_group"], ["10 - 19", "40 - 49"], ordered=True
)
states["age_group"] = states["age_group"].cat.rename_categories(
{"10 - 19": "10-19", "40 - 49": "40-49"}
)
states["region"] = pd.Categorical(
states["region"], ["Overtjssel", "Drenthe", "Gelderland"], ordered=False
)
states["date"] = pd.to_datetime(states["date"], format="%Y-%m-%d", unit="D")
states["n_has_infected"] = states["n_has_infected"].astype(int)
states["cd_infectious_false"] = states["cd_infectious_false"].astype(int)
states["occupation"] = states["age_group"].replace(
{"10-19": "in school", "40-49": "working"}
)
states["cd_symptoms_false"] = list(range(1, 21)) + list(range(-len(states), -20))
states["symptomatic"] = states["cd_symptoms_false"] >= 0
states["knows_infectious"] = False
states["knows_immune"] = False
states["cd_received_test_result_true"] = -100
states["knows_currently_infected"] = states.eval(
"knows_infectious | (knows_immune & symptomatic) "
"| (knows_immune & (cd_received_test_result_true >= -13))"
)
states["quarantine_compliance"] = 1.0
return states
@pytest.fixture
def a_thursday(states):
a_thursday = states[states["date"] == "2020-04-30"].copy()
a_thursday["cd_symptoms_false"] = list(range(1, 21)) + list(
range(-len(a_thursday), -20)
)
a_thursday["symptomatic"] = a_thursday["cd_symptoms_false"] >= 0
a_thursday["work_recurrent_weekly"] = draw_groups(
df=a_thursday,
query="occupation == 'working'",
assort_bys=["region"],
n_per_group=20,
seed=484,
)
return a_thursday
@pytest.fixture
def no_reduction_params():
params = pd.DataFrame()
params["subcategory"] = ["symptomatic_multiplier", "positive_test_multiplier"]
params["name"] = params["subcategory"]
params["value"] = 1.0
params = params.set_index(["subcategory", "name"])
return params
# ----------------------------------------------------------------------------
def test_go_to_weekly_meeting_wrong_day(a_thursday):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
contact_params = pd.DataFrame()
group_col_name = "group_col"
day_of_week = "Saturday"
seed = 3931
res = go_to_weekly_meeting(
a_thursday, contact_params, group_col_name, day_of_week, seed
)
expected = pd.Series(False, index=a_thursday.index)
assert_series_equal(res, expected, check_names=False)
def test_go_to_weekly_meeting_right_day(a_thursday, no_reduction_params):
a_thursday["group_col"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (len(a_thursday) - 7)
res = go_to_weekly_meeting(
states=a_thursday,
params=no_reduction_params,
group_col_name="group_col",
day_of_week="Thursday",
seed=3931,
)
expected = pd.Series(False, index=a_thursday.index)
expected[:7] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekend(states, no_reduction_params):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")].copy()
a_saturday["work_saturday"] = [True, True] + [False] * (len(a_saturday) - 2)
a_saturday["work_daily_group_id"] = 333
res = go_to_daily_work_meeting(a_saturday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_saturday.index)
expected[:2] = True
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekday(a_thursday, no_reduction_params):
a_thursday["work_daily_group_id"] = [1, 2, 1, 2, 3, 3, 3] + [-1] * (
len(a_thursday) - 7
)
res = go_to_daily_work_meeting(a_thursday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_thursday.index)
# not every one we assigned a group id is a worker
expected.iloc[:7] = [True, True, False, True, True, False, True]
assert_series_equal(res, expected, check_names=False)
def test_go_to_daily_work_meeting_weekday_with_reduction(
a_thursday, no_reduction_params
):
reduction_params = no_reduction_params
reduction_params["value"] = 0.0
a_thursday["work_daily_group_id"] = [1, 2, 1, 2, 3, 3, 3, 3, 3] + [-1] * (
len(a_thursday) - 9
)
a_thursday.loc[1450:1458, "symptomatic"] = [
False,
False,
False,
False,
True,
False,
False,
False,
False,
]
res = go_to_daily_work_meeting(a_thursday, no_reduction_params, seed=None)
expected = pd.Series(False, index=a_thursday.index)
# not every one we assigned a group id is a worker
expected[:9] = [True, True, False, True, False, False, True, False, True]
assert_series_equal(res, expected, check_names=False)
# --------------------------- Non Recurrent Contact Models ---------------------------
def test_non_recurrent_work_contacts_weekend(states, params):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")]
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_saturday,
params=params.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=494,
)
assert_series_equal(res, pd.Series(data=0, index=a_saturday.index, dtype=float))
@pytest.fixture
def params_with_positive():
params = pd.DataFrame.from_dict(
{
"category": ["work_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
)
params = params.set_index(["category", "subcategory", "name"])
return params
def test_non_recurrent_work_contacts_no_random_no_sick(
a_thursday, params_with_positive
):
a_thursday["symptomatic"] = False
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=433,
)
expected = a_thursday["age_group"].replace({"10-19": 0.0, "40-49": 2.0})
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_no_random_no_sick_sat(
states, params_with_positive
):
a_saturday = states[states["date"] == pd.Timestamp("2020-04-04")].copy()
a_saturday["symptomatic"] = False
a_saturday["participates_saturday"] = [True, True, True] + [False] * (
len(a_saturday) - 3
)
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_saturday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends="participates",
query="occupation == 'working'",
seed=433,
)
expected = pd.Series(0, index=a_saturday.index)
expected[:2] = 2
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_no_random_with_sick(
a_thursday, params_with_positive
):
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params_with_positive.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=448,
)
expected = a_thursday["age_group"].replace({"10-19": 0.0, "40-49": 2.0})
expected[:20] = 0.0
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_work_contacts_random_with_sick(a_thursday):
np.random.seed(77)
params = pd.DataFrame.from_dict(
{
"category": ["work_non_recurrent"] * 4,
"subcategory": ["all"] * 2
+ ["symptomatic_multiplier", "positive_test_multiplier"],
"name": [
3,
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [0.5, 0.5, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["work_non_recurrent"],
on_weekends=False,
query="occupation == 'working'",
seed=338,
)
assert (res[:20] == 0).all() # symptomatics
assert (res[a_thursday["occupation"] != "working"] == 0).all() # non workers
healthy_workers = (a_thursday["occupation"] == "working") & (
a_thursday["cd_symptoms_false"] < 0
)
assert res[healthy_workers].isin([2, 3]).all()
# ------------------------------------------------------------------------------------
def test_non_recurrent_other_contacts_no_random_no_sick(a_thursday):
a_thursday["symptomatic"] = False
params = pd.DataFrame.from_dict(
{
"category": ["other_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["other_non_recurrent"],
on_weekends=True,
query=None,
seed=334,
)
expected = pd.Series(data=2, index=a_thursday.index)
assert_series_equal(res, expected, check_names=False, check_dtype=False)
def test_non_recurrent_other_contacts_no_random_with_sick(a_thursday):
params = pd.DataFrame.from_dict(
{
"category": ["other_non_recurrent"] * 3,
"subcategory": [
"all",
"symptomatic_multiplier",
"positive_test_multiplier",
],
"name": [
2,
"symptomatic_multiplier",
"positive_test_multiplier",
], # nr of contacts
"value": [1.0, 0.0, 0.0], # probability
}
).set_index(["category", "subcategory", "name"])
res = calculate_non_recurrent_contacts_from_empirical_distribution(
states=a_thursday,
params=params.loc["other_non_recurrent"],
on_weekends=True,
query=None,
seed=332,
)
expected = pd.Series(data=2, index=a_thursday.index)
expected[:20] = 0
| assert_series_equal(res, expected, check_names=False, check_dtype=False) | pandas.testing.assert_series_equal |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
DatetimeIndex,
Series,
concat,
isna,
notna,
)
import pandas._testing as tm
import pandas.tseries.offsets as offsets
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
pytest.param(
lambda x: np.isfinite(x).astype(float).sum(),
"count",
{},
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_series(series, compare_func, roll_func, kwargs):
result = getattr(series.rolling(50), roll_func)(**kwargs)
assert isinstance(result, Series)
tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
pytest.param(
lambda x: np.isfinite(x).astype(float).sum(),
"count",
{},
marks=pytest.mark.filterwarnings("ignore:min_periods:FutureWarning"),
),
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_frame(raw, frame, compare_func, roll_func, kwargs):
result = getattr(frame.rolling(50), roll_func)(**kwargs)
assert isinstance(result, DataFrame)
tm.assert_series_equal(
result.iloc[-1, :],
frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_series(series, compare_func, roll_func, kwargs, minp):
win = 25
ser = series[::2].resample("B").mean()
series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = series[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], compare_func(trunc_series))
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs, minp",
[
[np.mean, "mean", {}, 10],
[np.nansum, "sum", {}, 10],
[lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],
[np.median, "median", {}, 10],
[np.min, "min", {}, 10],
[np.max, "max", {}, 10],
[lambda x: np.std(x, ddof=1), "std", {}, 10],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],
[lambda x: np.var(x, ddof=1), "var", {}, 10],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],
],
)
def test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):
win = 25
frm = frame[::2].resample("B").mean()
frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(
**kwargs
)
last_date = frame_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_frame = frame[::2].truncate(prev_date, last_date)
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(compare_func, raw=raw),
check_names=False,
)
@pytest.mark.parametrize(
"compare_func, roll_func, kwargs",
[
[np.mean, "mean", {}],
[np.nansum, "sum", {}],
[np.median, "median", {}],
[np.min, "min", {}],
[np.max, "max", {}],
[lambda x: np.std(x, ddof=1), "std", {}],
[lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],
[lambda x: np.var(x, ddof=1), "var", {}],
[lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],
],
)
def test_nans(compare_func, roll_func, kwargs):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)
tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))
# min_periods is working correctly
result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(np.random.randn(20))
result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if roll_func != "sum":
result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs)
result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs)
tm.assert_almost_equal(result0, result1)
def test_nans_count():
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = obj.rolling(50, min_periods=30).count()
tm.assert_almost_equal(
result.iloc[-1], np.isfinite(obj[10:-10]).astype(float).sum()
)
@pytest.mark.parametrize(
"roll_func, kwargs",
[
["mean", {}],
["sum", {}],
["median", {}],
["min", {}],
["max", {}],
["std", {}],
["std", {"ddof": 0}],
["var", {}],
["var", {"ddof": 0}],
],
)
@pytest.mark.parametrize("minp", [0, 99, 100])
def test_min_periods(series, minp, roll_func, kwargs):
result = getattr(series.rolling(len(series) + 1, min_periods=minp), roll_func)(
**kwargs
)
expected = getattr(series.rolling(len(series), min_periods=minp), roll_func)(
**kwargs
)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
def test_min_periods_count(series):
result = series.rolling(len(series) + 1, min_periods=0).count()
expected = series.rolling(len(series), min_periods=0).count()
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
@pytest.mark.parametrize(
"roll_func, kwargs, minp",
[
["mean", {}, 15],
["sum", {}, 15],
["count", {}, 0],
["median", {}, 15],
["min", {}, 15],
["max", {}, 15],
["std", {}, 15],
["std", {"ddof": 0}, 15],
["var", {}, 15],
["var", {"ddof": 0}, 15],
],
)
def test_center(roll_func, kwargs, minp):
obj = Series(np.random.randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
result = getattr(obj.rolling(20, min_periods=minp, center=True), roll_func)(
**kwargs
)
expected = getattr(
concat([obj, Series([np.NaN] * 9)]).rolling(20, min_periods=minp), roll_func
)(**kwargs)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"roll_func, kwargs, minp, fill_value",
[
["mean", {}, 10, None],
["sum", {}, 10, None],
["count", {}, 0, 0],
["median", {}, 10, None],
["min", {}, 10, None],
["max", {}, 10, None],
["std", {}, 10, None],
["std", {"ddof": 0}, 10, None],
["var", {}, 10, None],
["var", {"ddof": 0}, 10, None],
],
)
def test_center_reindex_series(series, roll_func, kwargs, minp, fill_value):
# shifter index
s = [f"x{x:d}" for x in range(12)]
series_xp = (
getattr(
series.reindex(list(series.index) + s).rolling(window=25, min_periods=minp),
roll_func,
)(**kwargs)
.shift(-12)
.reindex(series.index)
)
series_rs = getattr(
series.rolling(window=25, min_periods=minp, center=True), roll_func
)(**kwargs)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
@pytest.mark.parametrize(
"roll_func, kwargs, minp, fill_value",
[
["mean", {}, 10, None],
["sum", {}, 10, None],
["count", {}, 0, 0],
["median", {}, 10, None],
["min", {}, 10, None],
["max", {}, 10, None],
["std", {}, 10, None],
["std", {"ddof": 0}, 10, None],
["var", {}, 10, None],
["var", {"ddof": 0}, 10, None],
],
)
def test_center_reindex_frame(frame, roll_func, kwargs, minp, fill_value):
# shifter index
s = [f"x{x:d}" for x in range(12)]
frame_xp = (
getattr(
frame.reindex(list(frame.index) + s).rolling(window=25, min_periods=minp),
roll_func,
)(**kwargs)
.shift(-12)
.reindex(frame.index)
)
frame_rs = getattr(
frame.rolling(window=25, min_periods=minp, center=True), roll_func
)(**kwargs)
if fill_value is not None:
frame_xp = frame_xp.fillna(fill_value)
tm.assert_frame_equal(frame_xp, frame_rs)
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(quantile=0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_rolling_functions_window_non_shrinkage(f):
# GH 7764
s = Series(range(4))
s_expected = Series(np.nan, index=s.index)
df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])
df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
s_result = f(s)
tm.assert_series_equal(s_result, s_expected)
df_result = f(df)
tm.assert_frame_equal(df_result, df_expected)
def test_rolling_max_gh6297():
"""Replicate result expected in GH #6297"""
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 2 datapoints on one of the days
indices.append(datetime(1975, 1, 3, 6, 0))
series = Series(range(1, 7), index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
expected = Series(
[1.0, 2.0, 6.0, 4.0, 5.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_max_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be max
expected = Series(
[0.0, 1.0, 2.0, 3.0, 20.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").max().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify median (10.0)
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).max()
tm.assert_series_equal(expected, x)
# Now specify mean (4+10+20)/3
v = (4.0 + 10.0 + 20.0) / 3.0
expected = Series(
[0.0, 1.0, 2.0, 3.0, v],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").mean().rolling(window=1).max()
tm.assert_series_equal(expected, x)
def test_rolling_min_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be min
expected = Series(
[0.0, 1.0, 2.0, 3.0, 4.0],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
r = series.resample("D").min().rolling(window=1)
tm.assert_series_equal(expected, r.min())
def test_rolling_median_resample():
indices = [datetime(1975, 1, i) for i in range(1, 6)]
# So that we can have 3 datapoints on last day (4, 10, and 20)
indices.append(datetime(1975, 1, 5, 1))
indices.append(datetime(1975, 1, 5, 2))
series = Series(list(range(0, 5)) + [10, 20], index=indices)
# Use floats instead of ints as values
series = series.map(lambda x: float(x))
# Sort chronologically
series = series.sort_index()
# Default how should be median
expected = Series(
[0.0, 1.0, 2.0, 3.0, 10],
index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),
)
x = series.resample("D").median().rolling(window=1).median()
tm.assert_series_equal(expected, x)
def test_rolling_median_memory_error():
# GH11722
n = 20000
Series(np.random.randn(n)).rolling(window=2, center=False).median()
Series(np.random.randn(n)).rolling(window=2, center=False).median()
@pytest.mark.parametrize(
"data_type",
[np.dtype(f"f{width}") for width in [4, 8]]
+ [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"],
)
def test_rolling_min_max_numeric_types(data_type):
# GH12373
# Just testing that these don't throw exceptions and that
# the return type is float64. Other tests will cover quantitative
# correctness
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()
assert result.dtypes[0] == np.dtype("f8")
result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()
assert result.dtypes[0] == np.dtype("f8")
@pytest.mark.parametrize(
"f",
[
lambda x: x.rolling(window=10, min_periods=0).count(),
lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),
lambda x: x.rolling(window=10, min_periods=5).max(),
lambda x: x.rolling(window=10, min_periods=5).min(),
lambda x: x.rolling(window=10, min_periods=5).sum(),
lambda x: x.rolling(window=10, min_periods=5).mean(),
lambda x: x.rolling(window=10, min_periods=5).std(),
lambda x: x.rolling(window=10, min_periods=5).var(),
lambda x: x.rolling(window=10, min_periods=5).skew(),
lambda x: x.rolling(window=10, min_periods=5).kurt(),
lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),
lambda x: x.rolling(window=10, min_periods=5).median(),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),
lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),
pytest.param(
lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),
marks=td.skip_if_no_scipy,
),
],
)
def test_moment_functions_zero_length(f):
# GH 8056
s = Series(dtype=np.float64)
s_expected = s
df1 = DataFrame()
df1_expected = df1
df2 = | DataFrame(columns=["a"]) | pandas.DataFrame |
from pathlib import Path
from geoid.acs import Puma
import pandas as pd
from geoid.censusnames import stusab
from .acs import build_acs
from .pums import build_pums_households
hhc_rac_cols = [f"b11001{rc}_001" for rc in list('abcdefg')]
hhc_eth_cols = [f"b11001{rc}_001" for rc in list('hi')]
def agg_inc_marginals(marginals):
rows = [c for c in marginals.index if c.startswith('b19025')]
return marginals.loc[rows]
def marginals_sum(marginals):
return pd.DataFrame({
'puma': marginals.iloc[:, 0],
'tracts': marginals.iloc[:, 1:].sum(axis=1)
})
def marginals_diff(marginals):
return ((marginals.iloc[:, 1:].sum(axis=1) - marginals.iloc[:, 0]) / marginals.iloc[:, 0])
# Fill in missing values in the aggregate income for the puma
def fill_puma_income_marginals(marginals, cols, total_col='b19025_001'):
"""Fill in missing aggregated income marginals for the puma"""
missing_count = marginals.loc[cols, 'puma'].isnull().sum()
if missing_count == 0:
return
# Leftover from the total aggregate
agg_inc_left = marginals.loc[total_col, 'puma'] - marginals.loc[cols, 'puma'].sum()
if pd.isna(agg_inc_left):
# There are no value puma marginals. Rare, so punt by just copying over the
# tracts.
tracts = list(marginals)[1:]
marginals.loc[cols, 'puma'] = marginals.loc[cols, tracts].sum(axis=1)
return
# Divide the difference over the missing records, then fill them in
filled_puma = marginals.loc[cols, 'puma'].fillna((agg_inc_left / missing_count).astype('Int64'))
# Check that the filed series is equal to the total
if total_col == total_col:
assert (filled_puma.sum() - marginals.loc[total_col, 'puma']).round(-1) == 0
marginals.loc[cols, 'puma'] = filled_puma
def fill_tract_income_marginals(marginals, tract_geoid, cols, total_col='b19025_001'):
"""Fill in missing income marginals for a tract, based on the proportion of the
income each race holds in the puma"""
tract_agg_income = marginals.loc[cols, tract_geoid]
agg_income = marginals.loc[total_col, tract_geoid] # Aggregate income for the tract
if pd.isna(agg_income):
return
race_agg_income = marginals.loc[cols, tract_geoid]
tract_agg_income.sum() / marginals.loc[total_col, 'puma']
missing = tract_agg_income.isnull()
missing_idx = tract_agg_income.index[missing]
left_over = agg_income - race_agg_income[~missing].sum() # Tract agg income not accounted for
# What portion of the missing income should we allocate to each of the
# missing race entries?
puma_missing_inc = marginals.loc[missing_idx, 'puma']
missing_prop = puma_missing_inc / puma_missing_inc.sum()
try:
marginals.loc[missing_idx, tract_geoid] = (missing_prop * left_over).round(0).astype('Int64')
except ValueError:
# Too many nans, so just punt and fill them in with zeros
marginals.loc[missing_idx, tract_geoid] = 0
# Check that the result is close enough. THis only works for the race columns, not the
# eth columns, although the eth columns will be really close. For the eth columns, nhwites+hispanics
# will be larger than whites, because hispanics includes some non-whites.
if total_col == 'b19025_001':
# print (marginals.loc[cols, tract_geoid].sum(), marginals.loc[total_col, tract_geoid])
assert (marginals.loc[cols, tract_geoid].sum() - marginals.loc[total_col, tract_geoid]).round(-1) == 0
# marginals = make_marginals_frame(pums_acs, tract_acs, puma_geoid)
def make_marginals_frame(pums_acs, tract_acs, puma_geoid, m90=False, use_puma_geoid=False):
"""Make a single marginals dataframe, which has the marginals for the
puma and all of the tracts in the puma"""
pacs = pums_acs.loc[puma_geoid]
tacs = tract_acs[tract_acs.puma == puma_geoid]
m_cols = [c for c in tacs.columns if '_m90' in c and c.startswith('b')]
est_cols = [c for c in tacs.columns if '_m90' not in c and c.startswith('b')]
if m90:
cols = m_cols
inc_rac_cols = [f"b19025{rc}_001_m90" for rc in list('abcdefg')]
inc_eth_cols = [f"b19025{rc}_001_m90" for rc in list('hi')]
total_col = 'b19025_001_m90'
total_col_a = 'b19025a_001_m90'
else:
cols = est_cols
inc_rac_cols = [f"b19025{rc}_001" for rc in list('abcdefg')]
inc_eth_cols = [f"b19025{rc}_001" for rc in list('hi')]
total_col = 'b19025_001'
total_col_a = 'b19025a_001'
marginals = pacs.loc[cols].to_frame('puma').join(tacs[cols].T)
try:
fill_puma_income_marginals(marginals, inc_rac_cols, total_col)
except Exception:
#marginals.loc[inc_rac_cols, 'puma'] = marginals.loc[inc_rac_cols, tracts].sum(axis=1)
raise
try:
fill_puma_income_marginals(marginals, inc_eth_cols, total_col_a)
except Exception as e:
raise
for tract_geoid in list(marginals.columns)[1:]:
fill_tract_income_marginals(marginals, tract_geoid, inc_rac_cols, total_col)
fill_tract_income_marginals(marginals, tract_geoid, inc_eth_cols, total_col_a)
if use_puma_geoid:
marginals = marginals.stack().to_frame('est')
marginals.index.names = ['marginal', 'region']
marginals.insert(0, 'puma', puma_geoid)
marginals.set_index('puma', inplace=True, append=True)
marginals = marginals.reorder_levels(['puma', 'marginal', 'region'])
marginals = marginals.rename(index=lambda v: v.replace('_m90',''))
return marginals
def make_state_marginals(state, year, release):
pums_acs = build_acs(state, sl='puma', year=year, release=release)
tract_acs = build_acs(state, sl='tract', year=year, release=release, add_puma=True)
f = [make_marginals_frame(pums_acs, tract_acs, puma_geoid, use_puma_geoid=True) for puma_geoid in pums_acs.index]
return pd.DataFrame(pd.concat(f)) # extra DataFrame to convert from CensusDataFrame
def hdf_path(path, year, release):
return Path(path).joinpath(f'synpums-source-{year}-{release}.hdf')
def write_source_data(path, year, release, cb=None, delete=False):
"""Build the households, puma and tract data for all states abd write them to an HDF file"""
fn = hdf_path(path, year, release)
if fn.exists() and delete:
fn.unlink()
if not fn.parent.exists():
fn.parent.mkdir(parents=True)
if fn.exists():
s = | pd.HDFStore(fn) | pandas.HDFStore |
import os
import pickle
import datetime
from dateutil.relativedelta import relativedelta
import yaml
import pandas as pd
import sqlalchemy
import RecallAdjuster as ra
NUM_TRIALS = 10
base = datetime.datetime.strptime('2018-04-01', '%Y-%m-%d')
date_pairs = []
for x in range(9,-1,-1):
date_pairs.append(
(
(base - relativedelta(months=4*x) - relativedelta(years=1)).strftime('%Y-%m-%d'),
(base - relativedelta(months=4*x) - relativedelta(years=1)).strftime('%Y-%m-%d')
)
)
date_pairs.append(
(
(base - relativedelta(months=4*x) - relativedelta(years=1)).strftime('%Y-%m-%d'),
(base - relativedelta(months=4*x)).strftime('%Y-%m-%d')
)
)
def connect(poolclass=sqlalchemy.pool.QueuePool):
with open(os.path.join(os.path.join('../..', 'config'), 'db_default_profile.yaml')) as fd:
config = yaml.load(fd)
dburl = sqlalchemy.engine.url.URL(
"postgres",
host=config["host"],
username=config["user"],
database=config["db"],
password=config["pass"],
port=config["port"],
)
return sqlalchemy.create_engine(dburl, poolclass=poolclass)
conn = connect()
all_fracs = []
all_ts = []
for i in range(NUM_TRIALS):
print('starting trial %s of %s...' % (i, NUM_TRIALS))
myRA = ra.RecallAdjuster(
engine=conn,
pg_role='johnson_county_ddj_write',
schema='kit_bias_class_test',
experiment_hashes='09b3bcab5a6e1eb1c712571f6a5abb75',
date_pairs=date_pairs,
list_sizes=[500],
#entity_demos='joco',
entity_demos='kit_bias_class_test.entity_demos',
demo_col='race_3way',
sample_weights={'W': 0.3, 'B': 0.6}
)
new_fracs = pd.read_sql("""
SELECT train_end_time, COUNT(*) AS num_models,
AVG(base_frac_b) AS avg_base_frac_b, AVG(base_frac_w) AS avg_base_frac_w, AVG(base_frac_h) AS avg_base_frac_h,
AVG(adj_frac_b) AS avg_adj_frac_b, AVG(adj_frac_w) AS avg_adj_frac_w, AVG(adj_frac_h) AS avg_adj_frac_h
FROM kit_bias_class_test.model_adjustment_results_race_3way
WHERE base_value >= 0.45
AND train_end_time > past_train_end_time
GROUP BY 1
ORDER BY 1 DESC;""", conn)
new_fracs['trial'] = i
all_fracs.append(new_fracs)
ts_sql = """
WITH mg_rns AS (
SELECT *,
row_number() OVER (PARTITION BY train_end_time, list_size, metric, parameter ORDER BY base_value DESC, base_max_recall_ratio ASC, RANDOM()) AS rn_base,
row_number() OVER (PARTITION BY train_end_time, list_size, metric, parameter ORDER BY adj_value DESC, adj_max_recall_ratio ASC, RANDOM()) AS rn_adj
FROM kit_bias_class_test.model_adjustment_results_race_3way
WHERE past_train_end_time = train_end_time
)
, base_mgs AS (
SELECT * FROM mg_rns WHERE rn_base = 1
)
, adj_mgs AS (
SELECT * FROM mg_rns WHERE rn_adj = 1
)
-- Simple model selection on last time period, baseline with no recall adjustments
SELECT 'Best Unadjusted Metric - Unadjusted'::VARCHAR(128) AS strategy,
r.train_end_time, r.past_train_end_time,
r.list_size, r.metric, r.parameter,
r.base_value AS value,
r.base_max_recall_ratio AS max_recall_ratio,
r.base_recall_w_to_b AS recall_w_to_b,
r.base_recall_w_to_h AS recall_w_to_h,
r.base_recall_b_to_h AS recall_b_to_h
FROM kit_bias_class_test.model_adjustment_results_race_3way r
JOIN base_mgs b
ON r.model_group_id = b.model_group_id
AND r.past_train_end_time = b.train_end_time
AND r.list_size = b.list_size
AND r.metric = b.metric
AND r.parameter = b.parameter
WHERE r.train_end_time > r.past_train_end_time
UNION ALL
-- Model selection on last time before adjustment, with adjustment applied
SELECT 'Best Unadjusted Metric - Adjusted'::VARCHAR(128) AS strategy,
r.train_end_time, r.past_train_end_time,
r.list_size, r.metric, r.parameter,
r.adj_value AS value,
r.adj_max_recall_ratio AS max_recall_ratio,
r.adj_recall_w_to_b AS recall_w_to_b,
r.adj_recall_w_to_h AS recall_w_to_h,
r.adj_recall_b_to_h AS recall_b_to_h
FROM kit_bias_class_test.model_adjustment_results_race_3way r
JOIN base_mgs b
ON r.model_group_id = b.model_group_id
AND r.past_train_end_time = b.train_end_time
AND r.list_size = b.list_size
AND r.metric = b.metric
AND r.parameter = b.parameter
WHERE r.train_end_time > r.past_train_end_time
UNION ALL
-- Model selection on last time after adjustment, with adjustment applied
SELECT 'Best Adjusted Metric - Adjusted'::VARCHAR(128) AS strategy,
r.train_end_time, r.past_train_end_time,
r.list_size, r.metric, r.parameter,
r.adj_value AS value,
r.adj_max_recall_ratio AS max_recall_ratio,
r.adj_recall_w_to_b AS recall_w_to_b,
r.adj_recall_w_to_h AS recall_w_to_h,
r.adj_recall_b_to_h AS recall_b_to_h
FROM kit_bias_class_test.model_adjustment_results_race_3way r
JOIN adj_mgs b
ON r.model_group_id = b.model_group_id
AND r.past_train_end_time = b.train_end_time
AND r.list_size = b.list_size
AND r.metric = b.metric
AND r.parameter = b.parameter
WHERE r.train_end_time > r.past_train_end_time
UNION ALL
-- Composite model
SELECT 'Composite Model - Adjusted'::VARCHAR(128) AS strategy,
future_train_end_time AS train_end_time, past_train_end_time,
list_size, metric, parameter,
value,
max_recall_ratio,
recall_w_to_b,
recall_w_to_h,
recall_b_to_h
FROM kit_bias_class_test.composite_results_race_3way
WHERE future_train_end_time > past_train_end_time
;
"""
new_ts = pd.read_sql(ts_sql, conn)
all_ts.append(new_ts)
result_dfs = {
'fracs': | pd.concat(all_fracs) | pandas.concat |
r"""
Word2Vec Model
==============
Introduces Gensim's Word2Vec model and demonstrates its use on the Lee Corpus.
"""
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
###############################################################################
# In case you missed the buzz, word2vec is a widely featured as a member of the
# “new wave” of machine learning algorithms based on neural networks, commonly
# referred to as "deep learning" (though word2vec itself is rather shallow).
# Using large amounts of unannotated plain text, word2vec learns relationships
# between words automatically. The output are vectors, one vector per word,
# with remarkable linear relationships that allow us to do things like:
#
# * vec("king") - vec("man") + vec("woman") =~ vec("queen")
# * vec("Montreal Canadiens") – vec("Montreal") + vec("Toronto") =~ vec("Toronto Maple Leafs").
#
# Word2vec is very useful in `automatic text tagging
# <https://github.com/RaRe-Technologies/movie-plots-by-genre>`_\ , recommender
# systems and machine translation.
#
# This tutorial:
#
# #. Introduces ``Word2Vec`` as an improvement over traditional bag-of-words
# #. Shows off a demo of ``Word2Vec`` using a pre-trained model
# #. Demonstrates training a new model from your own data
# #. Demonstrates loading and saving models
# #. Introduces several training parameters and demonstrates their effect
# #. Discusses memory requirements
# #. Visualizes Word2Vec embeddings by applying dimensionality reduction
#
# Review: Bag-of-words
# --------------------
#
# .. Note:: Feel free to skip these review sections if you're already familiar with the models.
#
# You may be familiar with the `bag-of-words model
# <https://en.wikipedia.org/wiki/Bag-of-words_model>`_ from the
# :ref:`core_concepts_vector` section.
# This model transforms each document to a fixed-length vector of integers.
# For example, given the sentences:
#
# - ``John likes to watch movies. Mary likes movies too.``
# - ``John also likes to watch football games. Mary hates football.``
#
# The model outputs the vectors:
#
# - ``[1, 2, 1, 1, 2, 1, 1, 0, 0, 0, 0]``
# - ``[1, 1, 1, 1, 0, 1, 0, 1, 2, 1, 1]``
#
# Each vector has 10 elements, where each element counts the number of times a
# particular word occurred in the document.
# The order of elements is arbitrary.
# In the example above, the order of the elements corresponds to the words:
# ``["John", "likes", "to", "watch", "movies", "Mary", "too", "also", "football", "games", "hates"]``.
#
# Bag-of-words models are surprisingly effective, but have several weaknesses.
#
# First, they lose all information about word order: "John likes Mary" and
# "Mary likes John" correspond to identical vectors. There is a solution: bag
# of `n-grams <https://en.wikipedia.org/wiki/N-gram>`__
# models consider word phrases of length n to represent documents as
# fixed-length vectors to capture local word order but suffer from data
# sparsity and high dimensionality.
#
# Second, the model does not attempt to learn the meaning of the underlying
# words, and as a consequence, the distance between vectors doesn't always
# reflect the difference in meaning. The ``Word2Vec`` model addresses this
# second problem.
#
# Introducing: the ``Word2Vec`` Model
# -----------------------------------
#
# ``Word2Vec`` is a more recent model that embeds words in a lower-dimensional
# vector space using a shallow neural network. The result is a set of
# word-vectors where vectors close together in vector space have similar
# meanings based on context, and word-vectors distant to each other have
# differing meanings. For example, ``strong`` and ``powerful`` would be close
# together and ``strong`` and ``Paris`` would be relatively far.
#
# The are two versions of this model and :py:class:`~gensim.models.word2vec.Word2Vec`
# class implements them both:
#
# 1. Skip-grams (SG)
# 2. Continuous-bag-of-words (CBOW)
#
# .. Important::
# Don't let the implementation details below scare you.
# They're advanced material: if it's too much, then move on to the next section.
#
# The `Word2Vec Skip-gram <http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model>`__
# model, for example, takes in pairs (word1, word2) generated by moving a
# window across text data, and trains a 1-hidden-layer neural network based on
# the synthetic task of given an input word, giving us a predicted probability
# distribution of nearby words to the input. A virtual `one-hot
# <https://en.wikipedia.org/wiki/One-hot>`__ encoding of words
# goes through a 'projection layer' to the hidden layer; these projection
# weights are later interpreted as the word embeddings. So if the hidden layer
# has 300 neurons, this network will give us 300-dimensional word embeddings.
#
# Continuous-bag-of-words Word2vec is very similar to the skip-gram model. It
# is also a 1-hidden-layer neural network. The synthetic training task now uses
# the average of multiple input context words, rather than a single word as in
# skip-gram, to predict the center word. Again, the projection weights that
# turn one-hot words into averageable vectors, of the same width as the hidden
# layer, are interpreted as the word embeddings.
#
###############################################################################
# Word2Vec Demo
# -------------
#
# To see what ``Word2Vec`` can do, let's download a pre-trained model and play
# around with it. We will fetch the Word2Vec model trained on part of the
# Google News dataset, covering approximately 3 million words and phrases. Such
# a model can take hours to train, but since it's already available,
# downloading and loading it with Gensim takes minutes.
#
# .. Important::
# The model is approximately 2GB, so you'll need a decent network connection
# to proceed. Otherwise, skip ahead to the "Training Your Own Model" section
# below.
#
# You may also check out an `online word2vec demo
# <http://radimrehurek.com/2014/02/word2vec-tutorial/#app>`_ where you can try
# this vector algebra for yourself. That demo runs ``word2vec`` on the
# **entire** Google News dataset, of **about 100 billion words**.
#
import gensim.downloader as api
wv = api.load('word2vec-google-news-300')
###############################################################################
# A common operation is to retrieve the vocabulary of a model. That is trivial:
for i, word in enumerate(wv.vocab):
if i == 10:
break
print(word)
###############################################################################
# We can easily obtain vectors for terms the model is familiar with:
#
vec_king = wv['king']
###############################################################################
# Unfortunately, the model is unable to infer vectors for unfamiliar words.
# This is one limitation of Word2Vec: if this limitation matters to you, check
# out the FastText model.
#
try:
vec_cameroon = wv['cameroon']
except KeyError:
print("The word 'cameroon' does not appear in this model")
###############################################################################
# Moving on, ``Word2Vec`` supports several word similarity tasks out of the
# box. You can see how the similarity intuitively decreases as the words get
# less and less similar.
#
pairs = [
('car', 'minivan'), # a minivan is a kind of car
('car', 'bicycle'), # still a wheeled vehicle
('car', 'airplane'), # ok, no wheels, but still a vehicle
('car', 'cereal'), # ... and so on
('car', 'communism'),
]
for w1, w2 in pairs:
print('%r\t%r\t%.2f' % (w1, w2, wv.similarity(w1, w2)))
###############################################################################
# Print the 5 most similar words to "car" or "minivan"
print(wv.most_similar(positive=['car', 'minivan'], topn=5))
###############################################################################
# Which of the below does not belong in the sequence?
print(wv.doesnt_match(['fire', 'water', 'land', 'sea', 'air', 'car']))
###############################################################################
# Training Your Own Model
# -----------------------
#
# To start, you'll need some data for training the model. For the following
# examples, we'll use the `Lee Corpus
# <https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/test/test_data/lee_background.cor>`_
# (which you already have if you've installed gensim).
#
# This corpus is small enough to fit entirely in memory, but we'll implement a
# memory-friendly iterator that reads it line-by-line to demonstrate how you
# would handle a larger corpus.
#
from gensim.test.utils import datapath
from gensim import utils
class MyCorpus(object):
"""An interator that yields sentences (lists of str)."""
def __iter__(self):
corpus_path = datapath('lee_background.cor')
for line in open(corpus_path):
# assume there's one document per line, tokens separated by whitespace
yield utils.simple_preprocess(line)
###############################################################################
# If we wanted to do any custom preprocessing, e.g. decode a non-standard
# encoding, lowercase, remove numbers, extract named entities... All of this can
# be done inside the ``MyCorpus`` iterator and ``word2vec`` doesn’t need to
# know. All that is required is that the input yields one sentence (list of
# utf8 words) after another.
#
# Let's go ahead and train a model on our corpus. Don't worry about the
# training parameters much for now, we'll revisit them later.
#
import gensim.models
sentences = MyCorpus()
model = gensim.models.Word2Vec(sentences=sentences)
###############################################################################
# Once we have our model, we can use it in the same way as in the demo above.
#
# The main part of the model is ``model.wv``\ , where "wv" stands for "word vectors".
#
vec_king = model.wv['king']
###############################################################################
# Retrieving the vocabulary works the same way:
for i, word in enumerate(model.wv.vocab):
if i == 10:
break
print(word)
###############################################################################
# Storing and loading models
# --------------------------
#
# You'll notice that training non-trivial models can take time. Once you've
# trained your model and it works as expected, you can save it to disk. That
# way, you don't have to spend time training it all over again later.
#
# You can store/load models using the standard gensim methods:
#
import tempfile
with tempfile.NamedTemporaryFile(prefix='gensim-model-', delete=False) as tmp:
temporary_filepath = tmp.name
model.save(temporary_filepath)
#
# The model is now safely stored in the filepath.
# You can copy it to other machines, share it with others, etc.
#
# To load a saved model:
#
new_model = gensim.models.Word2Vec.load(temporary_filepath)
###############################################################################
# which uses pickle internally, optionally ``mmap``\ ‘ing the model’s internal
# large NumPy matrices into virtual memory directly from disk files, for
# inter-process memory sharing.
#
# In addition, you can load models created by the original C tool, both using
# its text and binary formats::
#
# model = gensim.models.KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False)
# # using gzipped/bz2 input works too, no need to unzip
# model = gensim.models.KeyedVectors.load_word2vec_format('/tmp/vectors.bin.gz', binary=True)
#
###############################################################################
# Training Parameters
# -------------------
#
# ``Word2Vec`` accepts several parameters that affect both training speed and quality.
#
# min_count
# ---------
#
# ``min_count`` is for pruning the internal dictionary. Words that appear only
# once or twice in a billion-word corpus are probably uninteresting typos and
# garbage. In addition, there’s not enough data to make any meaningful training
# on those words, so it’s best to ignore them:
#
# default value of min_count=5
model = gensim.models.Word2Vec(sentences, min_count=10)
###############################################################################
#
# size
# ----
#
# ``size`` is the number of dimensions (N) of the N-dimensional space that
# gensim Word2Vec maps the words onto.
#
# Bigger size values require more training data, but can lead to better (more
# accurate) models. Reasonable values are in the tens to hundreds.
#
# default value of size=100
model = gensim.models.Word2Vec(sentences, size=200)
###############################################################################
# workers
# -------
#
# ``workers`` , the last of the major parameters (full list `here
# <http://radimrehurek.com/gensim/models/word2vec.html#gensim.models.word2vec.Word2Vec>`_)
# is for training parallelization, to speed up training:
#
# default value of workers=3 (tutorial says 1...)
model = gensim.models.Word2Vec(sentences, workers=4)
###############################################################################
# The ``workers`` parameter only has an effect if you have `Cython
# <http://cython.org/>`_ installed. Without Cython, you’ll only be able to use
# one core because of the `GIL
# <https://wiki.python.org/moin/GlobalInterpreterLock>`_ (and ``word2vec``
# training will be `miserably slow
# <http://rare-technologies.com/word2vec-in-python-part-two-optimizing/>`_\ ).
#
###############################################################################
# Memory
# ------
#
# At its core, ``word2vec`` model parameters are stored as matrices (NumPy
# arrays). Each array is **#vocabulary** (controlled by min_count parameter)
# times **#size** (size parameter) of floats (single precision aka 4 bytes).
#
# Three such matrices are held in RAM (work is underway to reduce that number
# to two, or even one). So if your input contains 100,000 unique words, and you
# asked for layer ``size=200``\ , the model will require approx.
# ``100,000*200*4*3 bytes = ~229MB``.
#
# There’s a little extra memory needed for storing the vocabulary tree (100,000 words would take a few megabytes), but unless your words are extremely loooong strings, memory footprint will be dominated by the three matrices above.
#
###############################################################################
# Evaluating
# ----------
#
# ``Word2Vec`` training is an unsupervised task, there’s no good way to
# objectively evaluate the result. Evaluation depends on your end application.
#
# Google has released their testing set of about 20,000 syntactic and semantic
# test examples, following the “A is to B as C is to D” task. It is provided in
# the 'datasets' folder.
#
# For example a syntactic analogy of comparative type is bad:worse;good:?.
# There are total of 9 types of syntactic comparisons in the dataset like
# plural nouns and nouns of opposite meaning.
#
# The semantic questions contain five types of semantic analogies, such as
# capital cities (Paris:France;Tokyo:?) or family members
# (brother:sister;dad:?).
#
###############################################################################
# Gensim supports the same evaluation set, in exactly the same format:
#
model.accuracy('./datasets/questions-words.txt')
###############################################################################
#
# This ``accuracy`` takes an `optional parameter
# <http://radimrehurek.com/gensim/models/word2vec.html#gensim.models.word2vec.Word2Vec.accuracy>`_
# ``restrict_vocab`` which limits which test examples are to be considered.
#
###############################################################################
# In the December 2016 release of Gensim we added a better way to evaluate semantic similarity.
#
# By default it uses an academic dataset WS-353 but one can create a dataset
# specific to your business based on it. It contains word pairs together with
# human-assigned similarity judgments. It measures the relatedness or
# co-occurrence of two words. For example, 'coast' and 'shore' are very similar
# as they appear in the same context. At the same time 'clothes' and 'closet'
# are less similar because they are related but not interchangeable.
#
model.evaluate_word_pairs(datapath('wordsim353.tsv'))
###############################################################################
# .. Important::
# Good performance on Google's or WS-353 test set doesn’t mean word2vec will
# work well in your application, or vice versa. It’s always best to evaluate
# directly on your intended task. For an example of how to use word2vec in a
# classifier pipeline, see this `tutorial
# <https://github.com/RaRe-Technologies/movie-plots-by-genre>`_.
#
###############################################################################
# Online training / Resuming training
# -----------------------------------
#
# Advanced users can load a model and continue training it with more sentences
# and `new vocabulary words <online_w2v_tutorial.ipynb>`_:
#
model = gensim.models.Word2Vec.load(temporary_filepath)
more_sentences = [
['Advanced', 'users', 'can', 'load', 'a', 'model',
'and', 'continue', 'training', 'it', 'with', 'more', 'sentences']
]
model.build_vocab(more_sentences, update=True)
model.train(more_sentences, total_examples=model.corpus_count, epochs=model.iter)
# cleaning up temporary file
import os
os.remove(temporary_filepath)
###############################################################################
# You may need to tweak the ``total_words`` parameter to ``train()``,
# depending on what learning rate decay you want to simulate.
#
# Note that it’s not possible to resume training with models generated by the C
# tool, ``KeyedVectors.load_word2vec_format()``. You can still use them for
# querying/similarity, but information vital for training (the vocab tree) is
# missing there.
#
###############################################################################
# Training Loss Computation
# -------------------------
#
# The parameter ``compute_loss`` can be used to toggle computation of loss
# while training the Word2Vec model. The computed loss is stored in the model
# attribute ``running_training_loss`` and can be retrieved using the function
# ``get_latest_training_loss`` as follows :
#
# instantiating and training the Word2Vec model
model_with_loss = gensim.models.Word2Vec(
sentences,
min_count=1,
compute_loss=True,
hs=0,
sg=1,
seed=42
)
# getting the training loss value
training_loss = model_with_loss.get_latest_training_loss()
print(training_loss)
###############################################################################
# Benchmarks
# ----------
#
# Let's run some benchmarks to see effect of the training loss computation code
# on training time.
#
# We'll use the following data for the benchmarks:
#
# #. Lee Background corpus: included in gensim's test data
# #. Text8 corpus. To demonstrate the effect of corpus size, we'll look at the
# first 1MB, 10MB, 50MB of the corpus, as well as the entire thing.
#
import io
import os
import gensim.models.word2vec
import gensim.downloader as api
import smart_open
def head(path, size):
with smart_open.open(path) as fin:
return io.StringIO(fin.read(size))
def generate_input_data():
lee_path = datapath('lee_background.cor')
ls = gensim.models.word2vec.LineSentence(lee_path)
ls.name = '25kB'
yield ls
text8_path = api.load('text8').fn
labels = ('1MB', '10MB', '50MB', '100MB')
sizes = (1024 ** 2, 10 * 1024 ** 2, 50 * 1024 ** 2, 100 * 1024 ** 2)
for l, s in zip(labels, sizes):
ls = gensim.models.word2vec.LineSentence(head(text8_path, s))
ls.name = l
yield ls
input_data = list(generate_input_data())
###############################################################################
# We now compare the training time taken for different combinations of input
# data and model training parameters like ``hs`` and ``sg``.
#
# For each combination, we repeat the test several times to obtain the mean and
# standard deviation of the test duration.
#
# Temporarily reduce logging verbosity
logging.root.level = logging.ERROR
import time
import numpy as np
import pandas as pd
train_time_values = []
seed_val = 42
sg_values = [0, 1]
hs_values = [0, 1]
fast = True
if fast:
input_data_subset = input_data[:3]
else:
input_data_subset = input_data
for data in input_data_subset:
for sg_val in sg_values:
for hs_val in hs_values:
for loss_flag in [True, False]:
time_taken_list = []
for i in range(3):
start_time = time.time()
w2v_model = gensim.models.Word2Vec(
data,
compute_loss=loss_flag,
sg=sg_val,
hs=hs_val,
seed=seed_val,
)
time_taken_list.append(time.time() - start_time)
time_taken_list = np.array(time_taken_list)
time_mean = np.mean(time_taken_list)
time_std = np.std(time_taken_list)
model_result = {
'train_data': data.name,
'compute_loss': loss_flag,
'sg': sg_val,
'hs': hs_val,
'train_time_mean': time_mean,
'train_time_std': time_std,
}
print("Word2vec model #%i: %s" % (len(train_time_values), model_result))
train_time_values.append(model_result)
train_times_table = | pd.DataFrame(train_time_values) | pandas.DataFrame |
"""Run unit tests.
Use this to run tests and understand how tasks.py works.
Example:
Create directories::
mkdir -p test-data/input
mkdir -p test-data/output
Run tests::
pytest test_combine.py -s
Notes:
* this will create sample csv, xls and xlsx files
* test_combine_() test the main combine function
"""
from d6tstack.combine_csv import *
from d6tstack.sniffer import CSVSniffer
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import ntpath
import pytest
cfg_fname_base_in = 'test-data/input/test-data-'
cfg_fname_base_out_dir = 'test-data/output'
cfg_fname_base_out = cfg_fname_base_out_dir+'/test-data-'
cnxn_string = 'sqlite:///test-data/db/{}.db'
#************************************************************
# fixtures
#************************************************************
class TestLogPusher(object):
def __init__(self, event):
pass
def send_log(self, msg, status):
pass
def send(self, data):
pass
logger = TestLogPusher('combiner')
# sample data
def create_files_df_clean():
# create sample data
df1=pd.DataFrame({'date':pd.date_range('1/1/2011', periods=10), 'sales': 100, 'cost':-80, 'profit':20})
df2=pd.DataFrame({'date':pd.date_range('2/1/2011', periods=10), 'sales': 200, 'cost':-90, 'profit':200-90})
df3=pd.DataFrame({'date':pd.date_range('3/1/2011', periods=10), 'sales': 300, 'cost':-100, 'profit':300-100})
# cfg_col = [ 'date', 'sales','cost','profit']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
return df1, df2, df3
def create_files_df_clean_combine():
df1,df2,df3 = create_files_df_clean()
df_all = pd.concat([df1,df2,df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_clean_combine_with_filename(fname_list):
df1, df2, df3 = create_files_df_clean()
df1['filename'] = os.path.basename(fname_list[0])
df2['filename'] = os.path.basename(fname_list[1])
df3['filename'] = os.path.basename(fname_list[2])
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
def create_files_df_colmismatch_combine2(cfg_col_common):
df1, df2, df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
if cfg_col_common:
df_all = pd.concat([df1, df2, df3], join='inner')
else:
df_all = pd.concat([df1, df2, df3])
df_all = df_all[df_all.columns].astype(str)
return df_all
# csv standard
@pytest.fixture(scope="module")
def create_files_csv():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-clean-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch():
df1,df2,df3 = create_files_df_clean()
df3['profit2']=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colmismatch2():
df1,df2,df3 = create_files_df_clean()
for i in range(15):
df3['profit'+str(i)]=df3['profit']*2
# save files
cfg_fname = cfg_fname_base_in+'input-csv-colmismatch2-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False)
df2.to_csv(cfg_fname % 'feb',index=False)
df3.to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_colreorder():
df1,df2,df3 = create_files_df_clean()
cfg_col = [ 'date', 'sales','cost','profit']
cfg_col2 = [ 'date', 'sales','profit','cost']
# return df1[cfg_col], df2[cfg_col], df3[cfg_col]
# save files
cfg_fname = cfg_fname_base_in+'input-csv-reorder-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan',index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb',index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_noheader():
df1,df2,df3 = create_files_df_clean()
# save files
cfg_fname = cfg_fname_base_in+'input-noheader-csv-%s.csv'
df1.to_csv(cfg_fname % 'jan',index=False, header=False)
df2.to_csv(cfg_fname % 'feb',index=False, header=False)
df3.to_csv(cfg_fname % 'mar',index=False, header=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_csv_col_renamed():
df1, df2, df3 = create_files_df_clean()
df3 = df3.rename(columns={'sales':'revenue'})
cfg_col = ['date', 'sales', 'profit', 'cost']
cfg_col2 = ['date', 'revenue', 'profit', 'cost']
cfg_fname = cfg_fname_base_in + 'input-csv-renamed-%s.csv'
df1[cfg_col].to_csv(cfg_fname % 'jan', index=False)
df2[cfg_col].to_csv(cfg_fname % 'feb', index=False)
df3[cfg_col2].to_csv(cfg_fname % 'mar', index=False)
return [cfg_fname % 'jan', cfg_fname % 'feb', cfg_fname % 'mar']
def test_create_files_csv_col_renamed(create_files_csv_col_renamed):
pass
def create_files_csv_dirty(cfg_sep=",", cfg_header=True):
df1,df2,df3 = create_files_df_clean()
df1.to_csv(cfg_fname_base_in+'debug.csv',index=False, sep=cfg_sep, header=cfg_header)
return cfg_fname_base_in+'debug.csv'
# excel single-tab
def create_files_xls_single_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
df1.to_excel(cfg_fname % 'jan',index=False)
df2.to_excel(cfg_fname % 'feb',index=False)
df3.to_excel(cfg_fname % 'mar',index=False)
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_single():
return create_files_xls_single_helper(cfg_fname_base_in+'input-xls-sing-%s.xlsx')
def write_file_xls(dfg, fname, startrow=0,startcol=0):
writer = pd.ExcelWriter(fname)
dfg.to_excel(writer, 'Sheet1', index=False,startrow=startrow,startcol=startcol)
dfg.to_excel(writer, 'Sheet2', index=False,startrow=startrow,startcol=startcol)
writer.save()
# excel multi-tab
def create_files_xls_multiple_helper(cfg_fname):
df1,df2,df3 = create_files_df_clean()
write_file_xls(df1,cfg_fname % 'jan')
write_file_xls(df2,cfg_fname % 'feb')
write_file_xls(df3,cfg_fname % 'mar')
return [cfg_fname % 'jan',cfg_fname % 'feb',cfg_fname % 'mar']
@pytest.fixture(scope="module")
def create_files_xls_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xls')
@pytest.fixture(scope="module")
def create_files_xlsx_multiple():
return create_files_xls_multiple_helper(cfg_fname_base_in+'input-xls-mult-%s.xlsx')
#************************************************************
# tests - helpers
#************************************************************
def test_file_extensions_get():
fname_list = ['a.csv','b.csv']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.csv','.csv']
fname_list = ['a.xls','b.xls']
ext_list = file_extensions_get(fname_list)
assert ext_list==['.xls','.xls']
def test_file_extensions_all_equal():
ext_list = ['.csv']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.xls']*2
assert file_extensions_all_equal(ext_list)
ext_list = ['.csv','.xls']
assert not file_extensions_all_equal(ext_list)
def test_file_extensions_valid():
ext_list = ['.csv']*2
assert file_extensions_valid(ext_list)
ext_list = ['.xls']*2
assert file_extensions_valid(ext_list)
ext_list = ['.exe','.xls']
assert not file_extensions_valid(ext_list)
#************************************************************
#************************************************************
# combine_csv
#************************************************************
#************************************************************
def test_csv_sniff_single(create_files_csv, create_files_csv_noheader):
sniff = CSVSniffer(create_files_csv[0])
sniff.get_delim()
assert sniff.delim == ','
assert sniff.count_skiprows() == 0
assert sniff.has_header()
fname = create_files_csv_dirty("|")
sniff = CSVSniffer(fname)
sniff.get_delim()
assert sniff.delim == "|"
assert sniff.has_header()
df1,df2,df3 = create_files_df_clean()
assert sniff.nrows == df1.shape[0]+1
# no header test
sniff = CSVSniffer(create_files_csv_noheader[0])
sniff.get_delim()
assert sniff.delim == ','
assert sniff.count_skiprows() == 0
assert not sniff.has_header()
def test_csv_sniff_multi(create_files_csv, create_files_csv_noheader):
sniff = CSVSnifferList(create_files_csv)
assert sniff.get_delim() == ','
assert sniff.count_skiprows() == 0
assert sniff.has_header()
# no header test
sniff = CSVSnifferList(create_files_csv_noheader)
sniff.get_delim()
assert sniff.get_delim() == ','
assert sniff.count_skiprows() == 0
assert not sniff.has_header()
def test_CombinerCSV_columns(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
with pytest.raises(ValueError) as e:
c = CombinerCSV([])
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
# todo: cache the preview dfs somehow? reading the same in next step
assert col_preview['is_all_equal']
assert col_preview['columns_all']==col_preview['columns_common']
assert col_preview['columns_all']==['cost', 'date', 'profit', 'sales']
fname_list = create_files_csv_colmismatch
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
# todo: cache the preview dfs somehow? reading the same in next step
assert not col_preview['is_all_equal']
assert not col_preview['columns_all']==col_preview['columns_common']
assert col_preview['columns_all']==['cost', 'date', 'profit', 'profit2', 'sales']
assert col_preview['columns_common']==['cost', 'date', 'profit', 'sales']
assert col_preview['columns_unique']==['profit2']
fname_list = create_files_csv_colreorder
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
col_preview = combiner.preview_columns()
assert not col_preview['is_all_equal']
assert col_preview['columns_all']==col_preview['columns_common']
def test_CombinerCSV_combine(create_files_csv, create_files_csv_colmismatch, create_files_csv_colreorder):
# all columns present
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
df = combiner.combine()
df = df.sort_values('date').drop(['filename'],axis=1)
df_chk = create_files_df_clean_combine()
assert df.equals(df_chk)
df = combiner.combine()
df = df.groupby('filename').head(combiner.nrows_preview)
df_chk = combiner.preview_combine()
assert df.equals(df_chk)
# columns mismatch, all columns
fname_list = create_files_csv_colmismatch
combiner = CombinerCSV(fname_list=fname_list, all_strings=True, add_filename=True)
df = combiner.combine()
df = df.sort_values('date').drop(['filename'],axis=1)
df_chk = create_files_df_colmismatch_combine(cfg_col_common=False)
assert df.shape[1] == df_chk.shape[1]
# columns mismatch, common columns
df = combiner.combine(is_col_common=True)
df = df.sort_values('date').drop(['filename'], axis=1)
df_chk = create_files_df_colmismatch_combine(cfg_col_common=True)
assert df.shape[1] == df_chk.shape[1]
# Filename column True
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
df = combiner.combine()
df = df.sort_values('date')
df_chk = create_files_df_clean_combine_with_filename(fname_list)
assert df.equals(df_chk)
# Filename column False
combiner = CombinerCSV(fname_list=fname_list, all_strings=True, add_filename=False)
df = combiner.combine()
df = df.sort_values('date')
df_chk = create_files_df_clean_combine()
assert df.equals(df_chk)
def test_CombinerCSV_combine_advanced(create_files_csv):
# Check if rename worked correctly.
fname_list = create_files_csv
combiner = CombinerCSV(fname_list=fname_list, all_strings=True)
adv_combiner = CombinerCSV(fname_list=fname_list, all_strings=True,
columns_select=None, columns_rename={'date':'date1'})
df = adv_combiner.combine()
assert 'date1' in df.columns.values
assert 'date' not in df.columns.values
df = adv_combiner.preview_combine()
assert 'date1' in df.columns.values
assert 'date' not in df.columns.values
adv_combiner = CombinerCSV(fname_list=fname_list, all_strings=True,
columns_select=['cost', 'date', 'profit', 'profit2', 'sales'])
df = adv_combiner.combine()
assert 'profit2' in df.columns.values
assert df['profit2'].isnull().all()
df = adv_combiner.preview_combine()
assert 'profit2' in df.columns.values
assert df['profit2'].isnull().all()
def test_preview_dict():
df = pd.DataFrame({'col1':[0,1],'col2':[0,1]})
assert preview_dict(df) == {'columns': ['col1', 'col2'], 'rows': {0: [[0]], 1: [[1]]}}
#************************************************************
# tests - CombinerCSV rename and select columns
#************************************************************
def create_df_rename():
df11 = pd.DataFrame({'a':range(10)})
df12 = pd.DataFrame({'b': range(10)})
df21 = pd.DataFrame({'a':range(10),'c': range(10)})
df22 = pd.DataFrame({'b': range(10),'c': range(10)})
return df11, df12, df21, df22
# csv standard
@pytest.fixture(scope="module")
def create_files_csv_rename():
df11, df12, df21, df22 = create_df_rename()
# save files
cfg_fname = cfg_fname_base_in+'input-csv-rename-%s.csv'
df11.to_csv(cfg_fname % '11',index=False)
df12.to_csv(cfg_fname % '12',index=False)
df21.to_csv(cfg_fname % '21',index=False)
df22.to_csv(cfg_fname % '22',index=False)
return [cfg_fname % '11',cfg_fname % '12',cfg_fname % '21',cfg_fname % '22']
def test_create_files_csv_rename(create_files_csv_rename):
pass
@pytest.fixture(scope="module")
def create_out_files_csv_align_save():
cfg_outname = cfg_fname_base_out + 'input-csv-rename-%s-align-save.csv'
return [cfg_outname % '11', cfg_outname % '12',cfg_outname % '21',cfg_outname % '22']
@pytest.fixture(scope="module")
def create_out_files_parquet_align_save():
cfg_outname = cfg_fname_base_out + 'input-csv-rename-%s-align-save.parquet'
return [cfg_outname % '11', cfg_outname % '12',cfg_outname % '21',cfg_outname % '22']
def test_apply_select_rename():
df11, df12, df21, df22 = create_df_rename()
# rename 1, select all
assert df11.equals(apply_select_rename(df12.copy(),[],{'b':'a'}))
# rename and select 1
assert df11.equals(apply_select_rename(df22.copy(),['b'],{'b':'a'}))
assert df11.equals(apply_select_rename(df22.copy(),['a'],{'b':'a'}))
# rename and select 2
assert df21[list(dict.fromkeys(df21.columns))].equals(apply_select_rename(df22.copy(),['b','c'],{'b':'a'}))
assert df21[list(dict.fromkeys(df21.columns))].equals(apply_select_rename(df22.copy(),['a','c'],{'b':'a'}))
with pytest.warns(UserWarning, match="Renaming conflict"):
assert df22.equals(apply_select_rename(df22.copy(), ['b', 'c'], {'c': 'b'}))
def test_CombinerCSV_rename(create_files_csv_rename):
df11, df12, df21, df22 = create_df_rename()
df_chk1 = pd.concat([df11,df11])
df_chk2 = pd.concat([df11,df21])
def helper(fnames, cfg_col_sel,cfg_col_rename, df_chk, chk_filename=False, is_filename_col=True):
if cfg_col_sel and cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col,
columns_select=cfg_col_sel, columns_rename=cfg_col_rename)
elif cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col, columns_rename=cfg_col_rename)
else:
c2 = CombinerCSV(fnames, add_filename=is_filename_col)
dfc = c2.combine()
if (not chk_filename) and is_filename_col:
dfc = dfc.drop(['filename'], 1)
assert dfc.equals(df_chk)
if cfg_col_sel:
fname_out = cfg_fname_base_out_dir + '/test_save.csv'
c2.combine_save(fname_out)
dfc = pd.read_csv(fname_out)
if (not chk_filename) or is_filename_col:
dfc = dfc.drop(['filename'], 1)
assert dfc.equals(df_chk.reset_index(drop=True))
# rename 1, select all
l = create_files_csv_rename[:2]
helper(l,None,{'b':'a'},df_chk1)
with pytest.raises(ValueError) as e:
c2 = CombinerCSV(l, columns_select=['a','a'])
# rename 1, select some
l = [create_files_csv_rename[0],create_files_csv_rename[-1]]
helper(l,['a'],{'b':'a'},df_chk1)
helper(l,['b'],{'b':'a'},df_chk1)
helper(l,None,{'b':'a'},df_chk2)
l = [create_files_csv_rename[1],create_files_csv_rename[-1]]
helper(l,['a'],{'b':'a'},df_chk1)
helper(l,['b'],{'b':'a'},df_chk1)
helper(l,None,{'b':'a'},df_chk2)
with pytest.warns(UserWarning, match="Renaming conflict"):
c2 = CombinerCSV(l, columns_rename={'b': 'a', 'c': 'a'})
c2.combine()
# rename none, select all
l = [create_files_csv_rename[0],create_files_csv_rename[2]]
helper(l,None,None,df_chk2)
# filename col True
df31 = df11
df32 = df21
df31['filename'] = os.path.basename(l[0])
df32['filename'] = os.path.basename(l[1])
df_chk3 = pd.concat([df31, df32])
helper(l, None, None, df_chk3, is_filename_col=True, chk_filename=True)
helper(l, None, None, df_chk2, is_filename_col=False, chk_filename=True)
def test_CombinerCSV_align_save_advanced(create_files_csv_rename, create_out_files_csv_align_save):
df11, df12, df21, df22 = create_df_rename()
def helper(fnames, cfg_col_sel, cfg_col_rename, new_fnames, df_chks, is_filename_col=False):
if cfg_col_sel and cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col,
columns_select=cfg_col_sel, columns_rename=cfg_col_rename)
elif cfg_col_sel:
c2 = CombinerCSV(fnames, add_filename=is_filename_col, columns_select=cfg_col_sel)
elif cfg_col_rename:
c2 = CombinerCSV(fnames, add_filename=is_filename_col, columns_rename=cfg_col_rename)
else:
c2 = CombinerCSV(fnames, add_filename=is_filename_col)
c2.align_save(output_dir=cfg_fname_base_out_dir, prefix="-align-save")
for fname_out, df_chk in zip(new_fnames, df_chks):
dfc = | pd.read_csv(fname_out) | pandas.read_csv |
# ====================================================
# 推論メイン処理
# ====================================================
"""
import sys
package_path = '../input/pytorch-image-models/pytorch-image-models-master' #'../input/efficientnet-pytorch-07/efficientnet_pytorch-0.7.0'
sys.path.append(package_path)
sys.path.append("../input/cassava-script")
"""
from src.utils import set_seed
from src.data_set import prepare_dataloader, TestDataset
from src.model.train_model import CassvaImgClassifier
from src.learning import train_one_epoch, valid_one_epoch
from sklearn.model_selection import GroupKFold, StratifiedKFold
import torch
from torch import nn
import os
import torch.nn.functional as F
import sklearn
import warnings
import joblib
from sklearn.metrics import roc_auc_score, log_loss
from sklearn import metrics
import pandas as pd
import numpy as np
from torch.cuda.amp import autocast, GradScaler
from albumentations import (
HorizontalFlip, VerticalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine, RandomResizedCrop,
IAASharpen, IAAEmboss, RandomBrightnessContrast, Flip, OneOf, Compose, Normalize, Cutout, CoarseDropout, ShiftScaleRotate, CenterCrop, Resize
)
from albumentations.pytorch import ToTensorV2
from tqdm import tqdm
#設定
CFG = {
'fold_num': 5,
'seed': 42,
'model_arch': 'tf_efficientnet_b4_ns',
'img_size': 512,
'epochs': 10,
'train_bs': 16,
'valid_bs': 32,
'T_0': 10,
'lr': 1e-4,
'min_lr': 1e-6,
'weight_decay':1e-6,
#'num_workers': 4,
'num_workers': 0, #ローカルPCの設定
'accum_iter': 2, # suppoprt to do batch accumulation for backprop with effectively larger batch size
'verbose_step': 1,
#'device': 'cuda:0'
'device': 'cpu', #ローカルPCのときの設定
'tta': 4, #Inference用 どこの
'used_epochs': [4, 5, 6], #Inference用 どこのepocheを使うか
'weights': [1,1,1] ,#Inference用比率
}
def get_inference_transforms():
return Compose([
RandomResizedCrop(CFG['img_size'], CFG['img_size']),
Transpose(p=0.5),
HorizontalFlip(p=0.5),
VerticalFlip(p=0.5),
HueSaturationValue(hue_shift_limit=0.2, sat_shift_limit=0.2, val_shift_limit=0.2, p=0.5),
RandomBrightnessContrast(brightness_limit=(-0.1,0.1), contrast_limit=(-0.1, 0.1), p=0.5),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=255.0, p=1.0),
ToTensorV2(p=1.0),
], p=1.)
if __name__ == '__main__':
# for training only, need nightly build pytorch
#意図としてトレーニングしたときのvalの確認をしたい
set_seed(CFG['seed'])
#訓練データを読み込む
if(CFG["debug"] == True):
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv' , nrows = 50)
else:
train = pd.read_csv('../input/cassava-leaf-disease-classification/train.csv')
print(train)
folds = StratifiedKFold(n_splits=CFG['fold_num']).split(np.arange(train.shape[0]), train.label.values)
for fold, (trn_idx, val_idx) in enumerate(folds):
# we'll train fold 0 first
if fold > 0:
break
print('Inference fold {} started'.format(fold))
#検証用のデータセットを作成する
valid_ = train.loc[val_idx,:].reset_index(drop=True)
#valid_ds = CassavaDataset(valid_, '../input/cassava-leaf-disease-classification/train_images/', transforms=get_inference_transforms(), output_label=False)
#__init__(self, df, data_root, transform=None):
valid_ds = TestDataset(valid_, '../input/cassava-leaf-disease-classification/train_images/', transform=get_inference_transforms())
test = | pd.DataFrame() | pandas.DataFrame |
# Functions for comparing VCF variant calls files
import os
import vcf
import pandas as pd
import pdb
def read_true(refdir, NSC):
"""Reads BED files containing true SNV locations and returns DataFrame
with columns {sample, loc, snv}."""
true_df = pd.DataFrame(columns=['sample', 'loc', 'snv'])
for i in range(1, NSC + 1):
filepath = os.path.join(refdir, 'snv_sc%d.bed' % i)
ds = pd.read_csv(filepath, sep='\t').iloc[:,2]
df = pd.DataFrame({'sample': 'after_wga_sc%d' % i, 'loc': ds, 'snv': True})
true_df = true_df.append(df, ignore_index=True, sort = True)
return true_df
def read_monovar(callfile='calls/monovar/all.vcf'):
"""Read VCF file containing called SNV locations from Monovar and returns
DataFrame with columns {sample, loc, monovar}."""
monovar_df = | pd.DataFrame(columns=['sample', 'loc', 'monovar']) | pandas.DataFrame |
'''
Evaluates an ensemble model composed of aggregated single-feature models on test data and produces ROC and Precision-Recall graphs. Loops through a set of features, using a single feature at a time and combines the scores into an aggregated score either by 1) taking the mean (Mean Ensemble), or 2) taking an weighted mean (Weighted Ensemble). The weighted ensemble requires apriori knowledge of the feature weights.
'''
# --- Imports ---
import pandas as pd
import numpy as np
import os, sys
import time
from sklearn.metrics import precision_score, recall_score, precision_recall_curve, roc_curve
import statistics
# add the parent directory to the path
sys.path.insert(0, os.path.abspath("../../"))
sys.path.insert(0, os.path.abspath("../"))
sys.path.insert(0, os.path.abspath("../ranking/"))
from common import *
from constants_model import *
from model import *
# ---------------
def print_statistics(scores_dict, ports):
for port in ports:
print("\n\nport:", port)
res = [list(x) for x in zip(*scores_dict[port].values())]
means = [statistics.mean(x) for x in res]
print("\n\nmean:", means)
# stds = [statistics.pstdev(x) for x in res]
stds = [statistics.stdev(x) for x in res]
print("\n\nstd:", stds)
def combine_scores_by_mean(scores_dict):
res = [list(x) for x in zip(*scores_dict.values())]
# print("res, len:", len(res), res)
means = [statistics.mean(x) for x in res]
# print("\n\nmean:", means)
return means
def read_feature_importance(port, feature_imp_dir):
filename = os.path.join(feature_imp_dir, "port{}.csv".format(port))
data = pd.read_csv(filename, header=None, sep='\s+', index_col=0)
print(data)
return data
def apply_weights(feature_importance, scores_dict, feature_cols):
fsum = 0
sum_weights = 0
for f in feature_cols:
first_column = feature_importance.iloc[:, 0]
if f not in first_column:
continue
index = feature_cols.index(f)
feat_imp_f = feature_importance.loc[f, 1]
fsum += feat_imp_f * scores_dict[index]
sum_weights += feat_imp_f
print("sum_weights, fsum:", sum_weights, fsum)
return fsum / sum_weights
def combine_scores_by_imp(scores_dict, port, feature_imp_dir, feature_cols):
print("combining scores: ", port, feature_imp_dir)
feature_imp = read_feature_importance(port, feature_imp_dir)
res = [list(x) for x in zip(*scores_dict.values())]
# print("res, len:", len(res), res)
weighted_scores = [apply_weights(feature_imp, x, feature_cols) for x in res]
print("\n\nweighted_scores:", weighted_scores)
return weighted_scores
def get_combined_scores_per_port(port, feature_cols, t_file, model_dir=None, feature_imp_dir=None, weighted=True, labeled=True, ranking=False, port_feat_imp=None):
scores_dict = dict()
Y = []
for feat in feature_cols:
# print("Testing on feature: ", feat)
# read the model
model_file_name = os.path.join(model_dir, "{}_{}_model_{}.obj".format(feat, TRAIN_DAYS, MODEL))
models = get_model(model_file_name, None, [feat])
if port not in models:
return [], []
scores_dict[feat] = dict()
scaler, model = models[port]
test_data = pd.read_csv(t_file)
test_data = test_data[test_data['port'] == port]
if len(test_data) == 0:
return [], []
if labeled:
Y = test_data['label'].values
else:
Y = [False for i in range(len(test_data))]
# print("Y: ", Y)
# compute model scores for the feature columns of interest
X = test_data.filter([feat]).values
# print("X: ", X)
# print("Testing: port, min, max, mean, std: ", port, X.min(), X.max(), X.mean(), X.std())
if SCALAR_BOUNDED:
X = bound_test_data(scaler, X, [feat])
X = scaler.transform(X)
if MODEL == "isolation":
scores = model.score_samples(X)
else:
scores = np.exp(model.score_samples(X))
scores_dict[feat] = scores
print("\n\nport, scores, len, type: ", port, len(scores), type(scores), scores)
# print_statistics(scores_dict, PORTS)
if weighted:
scores_combined = np.asarray(combine_scores_by_imp(scores_dict, port_feat_imp, feature_imp_dir, feature_cols))
print("\n\nScores combined by feature imp: ", len(scores_combined), type(scores_combined), scores_combined)
else:
scores_combined = np.asarray(combine_scores_by_mean(scores_dict))
print("\n\nScores combined by mean: ", len(scores_combined), type(scores_combined), scores_combined)
return scores_combined, Y
def normalize_top_scores(scores_topk, port, feature_cols, t_file, model_dir=None, feature_imp_dir=None, weighted=True, port_feat_imp=None):
scores_dict = dict()
scores_final = []
# print("scores_topk: ", scores_topk)
windows_topk = [elem[0] for elem in scores_topk]
print("windows_topk: ", windows_topk)
scores_only_topk = [elem[1] for elem in scores_topk]
print("scores_only_topk: ", scores_only_topk)
for feat in feature_cols:
# print("Testing on feature: ", feat)
# read the model
# model_file_name = os.path.join(model_dir, "{}_{}_model_{}.obj".format(feat, TRAIN_DAYS, MODEL))
models = get_model(model_file_name, None, [feat])
scores_dict[feat] = dict()
scaler, model = models[port]
test_data = pd.read_csv(t_file)
test_data = test_data[test_data['port'] == port]
test_data = | pd.DataFrame({'timewindow':windows_topk}) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@Author <NAME>
Produces a final derived file containing all information required for data preparation
"""
import pandas as pd
import numpy as np
import math
import sys
# Monthly data
data_df=pd.read_csv('../../../collection/python/output/string_theory_indicators_monthly.csv')
#print(data_df)
data_df.rename(columns={'month':'date'},inplace=True)
data_df['date']=pd.to_datetime(data_df['DATE'])
data_df['month'] = data_df['date'].apply(lambda x: x.month)
data_df['year'] = data_df['date'].apply(lambda x: x.year)
# Daily data
data_daily=pd.read_csv('../../../derivation/python/output/daily_policy_data.csv')
data_daily['date']=pd.to_datetime(data_daily["DATE"])
data_daily['year']=data_daily["date"].apply(lambda x:x.year)
data_daily['month']=data_daily["date"].apply(lambda x:x.month)
#data_daily=data_daily[(data_daily['year']>=1987) & (data_daily['year']<2009)]
data_rates=data_daily[['TRY_3M', 'TRY-2Y', 'TRY-10Y','DFEDTAR','year', 'month']]
for var in ['TRY_3M', 'TRY-2Y', 'TRY-10Y']:
data_rates.loc[data_rates[var]=="ND",var]=np.nan
data_rates[['TRY_3M', 'TRY-2Y', 'TRY-10Y']]=data_rates[['TRY_3M', 'TRY-2Y', 'TRY-10Y']].astype(float)
data_rates=data_rates.groupby(['year', 'month']).mean()
data_rates=data_rates.rename(columns={"TRY-2Y":"TRY_2Y","TRY-10Y":"TRY_10Y","DFEDTAR":"FF_TAR"})
data_daily=data_daily[['DFEDTAR', 'end_date', 'event_type', 'date', 'year', 'month']]
# Make pre 1994 adjustment
data_daily.loc[data_daily['year']<1994,'event_type']=data_daily[data_daily['year']<1994]['event_type'].shift(periods=1)
data_daily=data_daily[data_daily['date'].notna()]
data_daily['d_meeting']=data_daily["event_type"]=="Meeting"
from calendar import monthrange
data_daily['days_month']=data_daily["date"].apply(lambda x:monthrange(x.year, x.month)[1])
data_daily['day']=data_daily['date'].apply(lambda x:x.day)
# Scale variable
data_daily['scale']=data_daily[data_daily["event_type"]=="Meeting"]['days_month'] / ( data_daily[data_daily["event_type"]=="Meeting"]['days_month'] - data_daily[data_daily["event_type"]=="Meeting"]['day'] + 1 )
data_scale=data_daily[['scale', 'year', 'month']].groupby(['year', 'month']).mean()
# Crisis dummy
data_daily['d_crisis']=data_daily["date"]>"2006-08-01"
data_crisis=data_daily[['d_crisis', 'year', 'month']].groupby(['year', 'month']).mean()
# Monthly target changes
data_meeting=data_daily[['d_meeting', 'year', 'month']].groupby(['year', 'month']).sum()
data_daily=data_daily[data_daily['day']==data_daily['days_month']]
data_daily.loc[:,'lag_DFEDTAR'] = data_daily['DFEDTAR'].shift(periods=1)
data_daily['ch_DFEDTAR']=data_daily['DFEDTAR']-data_daily['lag_DFEDTAR']
data_pchanges = data_daily[[ 'year', 'month','ch_DFEDTAR','lag_DFEDTAR','DFEDTAR']]
data_pchanges = data_pchanges.dropna()
# Define meeting / non-meeting months
clean_data=data_df[['date','month','year','INDPRO_PC1','PCEPI','PCEPI_PCA']]
clean_data=clean_data.sort_values(by=['year', 'month'])
clean_data.loc[:,'inflation']=data_df['PCEPI_PCH']
#clean_data.loc[:,'l1_inflation']=data_df['PCEPI_PCH'].shift(periods=1)
#clean_data.loc[:,'l2m_prices']=data_df['PCEPI'].shift(periods=2)
#clean_data['lagged_infl']=(np.log(clean_data['lagged_prices'])-np.log(clean_data['l2m_prices']))*100
clean_data.loc[:,'unemp']=data_df['UNRATE']
clean_data.loc[:,'lagged_unemp']=data_df['UNRATE'].shift(periods=1)
#clean_data.drop(columns=['lagged_prices', 'l2m_prices'],inplace=True)
#
market_df=pd.read_csv('../../../derivation/python/output/derived_monthly_market_exp.csv')
clean_data=clean_data.merge(market_df[['year','month','market_exp']],how='outer',on=['year','month'])
clean_data=clean_data.merge(data_pchanges,how='outer',on=['year','month'])
clean_data=clean_data.merge(data_meeting,how='outer',on=['year','month'])
clean_data=clean_data.merge(data_scale,how='outer',on=['year','month'])
clean_data=clean_data.merge(data_crisis,how='outer',on=['year','month'])
clean_data=clean_data.merge(data_rates,how='outer',on=['year','month'])
# Merge the policy menu
menu_df = | pd.read_csv('../../../analysis/python/output/monthly_treatment_counts.csv') | pandas.read_csv |
from askdata import nlg
import pandas as pd
if __name__ == "__main__":
df = | pd.DataFrame() | pandas.DataFrame |
"""\U0001F525\U0001FAB0 Luciferase conversion script.
The luciferase machine outputs a txt file that usually has to be converted to useful output manually.
This script simplifies the entire process by combining a "format.csv" file containing information on which well (96-well plate) has which sample and the "output.txt" file from the luciferase machine.
This is the proposed workflow:
* Create the format.csv file using `python luciferase.py --create_format`
* Use your editor of choice (excel, numbers) to fill in the rows and columns matching with your samples
* Save the output as .csv file (excel / numbers will suggest you to use their own formats)
* Merge the output.txt and format.csv file using `python luciferase.py -v PATH/TO/output.txt -f PATH/TO/format.csv`
"""
import argparse
import os
import re
import string
import pandas as pd
def create_format(name: str = "format.csv") -> None:
"""Create format.csv with rows and columns matching a 96-well plate."""
columns = list(range(1, 13))
index = re.findall("[A-H]", string.printable)
df = pd.DataFrame(index=index, columns=columns)
df.at["A", 1] = "Sample Name in A1"
df.to_csv(name)
print(f"Empty format file saved to {name}.")
def parse_files(file_values: str, file_format: str) -> None:
"""Parse the machines csv file and the """
if (not os.path.isfile(file_values)) or (not file_values.lower().endswith(".txt")):
raise ValueError(
f"Make sure the value.txt file exists and is txt. {file_values} does not!"
)
if (not os.path.isfile(file_format)) or (not file_format.lower().endswith(".csv")):
raise ValueError(
f"Make sure the format.csv file exists and is csv. {file_format} does not!"
)
# Create output file path
file_output = f"{os.path.splitext(file_values)[0]}.csv"
if os.path.isfile(file_output):
raise ValueError(
"The output.csv file already exists. "
f"I don't want to overwrite {file_output}!"
)
# Open txt value file
with open(file_values, "r") as f:
data = f.readlines()
# Open csv format file
df = | pd.read_csv(file_format, header=0, index_col=0) | pandas.read_csv |
from sodapy import Socrata
import geopandas
import pandas as pd
from dateutil.relativedelta import relativedelta
from datetime import timedelta, date
import numpy as np
from flask import Flask, send_from_directory
import csv
import json
#Directory for data files
ASSET_DIR = './Asset'
app = Flask(__name__, static_url_path='', static_folder='D3_Visualization')
with open(ASSET_DIR + '/wards.geojson', 'r') as f:
wardsDict = json.load(f)
#Home endpoint
@app.route('/')
def home():
return app.send_static_file('index.html')
#Border of wards endpoint
@app.route('/wards')
def getWards():
return wardsDict
#Crime endpoint
@app.route('/crimes')
def getCrimes():
dict = getUpdatedCrimeData()
#print(dict)
return dict
def getUpdatedCrimeData():
# Unauthenticated client only works with public data sets. Note 'None'
# in place of application token, and no username or password:
client = Socrata("data.cityofchicago.org", None)
# Example authenticated client (needed for non-public datasets):
# client = Socrata(data.cityofchicago.org,
# MyAppToken,
# userame="<EMAIL>",
# password="<PASSWORD>")
# First 2000 results, returned as JSON from API / converted to Python list of
# dictionaries by sodapy.
results = client.get("ijzp-q8t2", order="date DESC", limit=70000)
# Convert to pandas DataFrame
results_df = | pd.DataFrame.from_records(results) | pandas.DataFrame.from_records |
import unittest
import os
import tempfile
from collections import namedtuple
from blotter import blotter
from pandas.util.testing import assert_frame_equal, assert_series_equal, \
assert_dict_equal
import pandas as pd
import numpy as np
class TestBlotter(unittest.TestCase):
def setUp(self):
cdir = os.path.dirname(__file__)
self.prices = os.path.join(cdir, 'data/prices')
self.rates = os.path.join(cdir, 'data/rates/daily_interest_rates.csv')
self.log = os.path.join(cdir, 'data/events.log')
self.meta_log = os.path.join(cdir, 'data/meta_data.log')
def tearDown(self):
pass
def assertEventsEqual(self, evs1, evs2):
if len(evs1) != len(evs2):
raise(ValueError("Event lists length mismatch"))
for ev1, ev2 in zip(evs1, evs2):
self.assertEqual(ev1.type, ev2.type)
assert_dict_equal(ev1.data, ev2.data)
def assertEventTypes(self, evs1, evs2):
msg = "Event lists length mismatch\n\nLeft:\n%s \nRight:\n%s"
left_msg = ""
for ev in evs1:
left_msg += str(ev) + "\n"
right_msg = ""
for ev in evs2:
right_msg += ev.type + "\n"
msg = msg % (left_msg, right_msg)
if len(evs1) != len(evs2):
raise(ValueError(msg))
for ev1, ev2 in zip(evs1, evs2):
if ev1.type is not ev2.type:
raise(ValueError(msg))
def assertDictDataFrameEqual(self, dict1, dict2):
self.assertEqual(dict1.keys(), dict2.keys())
for key in dict1.keys():
try:
assert_frame_equal(dict1[key], dict2[key])
except AssertionError as e:
e.args = (("\nfor key %s\n" % key) + e.args[0],)
raise e
def make_blotter(self):
blt = blotter.Blotter(self.prices, self.rates)
return blt
def test_get_actions(self):
actions = [(pd.Timedelta("16h"), "PNL"),
(pd.Timedelta("16h"), "INTEREST")]
old_ts = pd.Timestamp("2017-01-04T10:30")
new_ts = pd.Timestamp("2017-01-06T10:30")
ac_ts = blotter.Blotter._get_actions(old_ts, new_ts, actions)
idx = pd.DatetimeIndex([pd.Timestamp("2017-01-04T16:00"),
| pd.Timestamp("2017-01-04T16:00") | pandas.Timestamp |
import json
import pandas as pd
import pytest
from transformers import AutoTokenizer, T5ForConditionalGeneration, DataCollatorForSeq2Seq, T5Config
from datasets import load_dataset
from pathlib import Path
from omegaconf import OmegaConf
from src.common.util import PROJECT_ROOT
import shutil
from promptsource.templates import DatasetTemplates
from functools import partial
import yaml
from src import tracking
from src.common import sanitize_name
def test_get_metrics_for_wandb(tmpdir):
pred_path = Path(tmpdir).joinpath("preds.jsonl")
met_path = Path(tmpdir).joinpath("metrics.json")
with met_path.open("w", encoding='utf-8') as f:
json.dump({"accuracy": 100}, f)
choices = ["Yes", "Maybe", "No"]
choice_logits = [
{
"choice_logits": {"0": -32.75, "1": -89.0, "2": -55.22}
},
{
"choice_logits": {"0": -32.92, "1": -88.09, "2": -55.96}
}
]
data = [{
"id" : 0, "prediction": "Yes", "target": "Yes",
"input": "Suppose The Parma trolleybus system (Italian: \"Rete filoviaria di Parma\" ) forms part of the public transport network of the city and \"comune\" of Parma, in the region of Emilia-Romagna, northern Italy. In operation since 1953, the system presently comprises four urban routes. Can we infer that \"The trolleybus system has over 2 urban routes\"? Yes, no, or maybe?"
}, {
"id" : 1, "prediction": "Yes", "target": "Maybe",
"input": "Suppose <NAME> (9 March 1946 \u2013 12 January 2014) was a British actress, best known for her role as secret agent <NAME> in the 1968 British espionage/science fiction adventure series \"The Champions\". She has been cited as a sex symbol of the 1960s and 1970s. Bastedo was a vegetarian and animal welfare advocate. Can we infer that \"<NAME> was a popular character through the 1980\'s.\"? Yes, no, or maybe?"
}]
expected_data = []
with pred_path.open('w', encoding='utf-8') as f:
for line, choice_info in zip(data, choice_logits):
f.write(json.dumps({**line, **choice_info}) + "\n")
line_record = line
for choice, (choice_id, logit) in zip(choices,
choice_info['choice_logits'].items()):
line_record[f"choice_{choice_id}"] = choice
line_record[f"choice_{choice_id}_logit"] = logit
line_record['correct'] = line_record['prediction'] == line_record['target']
expected_data.append(line_record)
metrics, result = tracking.get_metrics_for_wandb(met_path, pred_path, choices=choices)
assert set(metrics) == {
"accuracy"
}
assert metrics['accuracy'] == 100
expected = pd.DataFrame.from_records(expected_data)
result = result.reindex(sorted(result.columns), axis=1)
expected = expected.reindex(sorted(expected.columns), axis=1).sort_values(by=['id'])
| pd._testing.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
'''
Useful functions. Contains functions to load data as well as metrics on data processing.
'''
import time
import pandas as pd
class TimeError(Exception):
"""Custom Timer Exception"""
class MeasureError(Exception):
"""Custom measure exception"""
class Timer:
'''
Custom Class to time how long scripts take to run.
Usage:
from functions.utils import Timer
timer = Timer()
timer.start()
<code here>
timer.stop()
'''
def __init__(self):
self.__start = None
def start(self):
if self.__start is not None:
raise TimeError("Timer is already started")
self.__start = time.perf_counter()
def stop(self):
if self.__start is None:
raise TimeError("Timer has not been started. Use .start() to start timer")
time_taken = time.perf_counter() - self.__start
self.__start = None
if time_taken > 60:
time_taken = time_taken / 60
print(f"Finished in {time_taken} mins")
else:
print(f"Finished in {time_taken} seconds")
def load_data(measure, data):
'''
Function to load data.
Parameters
-------------
measure: str, which cortical measure to load.
options are volume, area and lgi
data: str, absolute path to data
Returns
-------------
results: dict of aan, wr, hc pd.dataframes,
names of brain regions, list and
centroids (co-ordinates) numpy array
'''
measure = measure.lower()
if measure == 'lgi':
try:
lh_measure = pd.read_csv(f'{data}/lh_lgi.dat', sep='\t').drop([
'BrainSegVolNotVent', 'eTIV'], axis=1).rename(columns={'lh.aparc.pial_lgi.thickness':'G-Number'})
rh_measure = pd.read_csv(f'{data}/rh_lgi.dat', sep='\t').drop([
'BrainSegVolNotVent', 'eTIV','rh.aparc.pial_lgi.thickness'], axis=1)
except Exception:
print('Unable to load files')
elif measure == 'area':
try:
lh_measure = pd.read_csv(f'{data}/lh_area.dat',sep='\t').drop(['lh_WhiteSurfArea_area',
'BrainSegVolNotVent', 'eTIV'],axis=1).rename(columns={'lh.aparc.area':'G-Number'})
rh_measure = pd.read_csv(f'{data}/rh_area.dat',sep='\t').drop(['rh_WhiteSurfArea_area',
'BrainSegVolNotVent', 'eTIV','rh.aparc.area'],axis=1)
except Exception as e:
print('Unable to load files', e)
elif measure == 'volume':
try:
lh_measure = pd.read_csv(f'{data}/lh_volume.dat',sep='\t').drop([
'BrainSegVolNotVent', 'eTIV'],axis=1).rename(columns={'lh.aparc.volume':'G-Number'})
rh_measure = pd.read_csv(f'{data}/rh_volume.dat',sep='\t').drop([
'BrainSegVolNotVent', 'eTIV','rh.aparc.volume'],axis=1)
except Exception as e:
print('Unable to load files', e)
else:
raise MeasureError('Unknown Measure. Please use LGI, area or volume (case independent). Use -h further help')
group = | pd.read_csv(f'{data}/cortical_measures.csv') | pandas.read_csv |
import pandas as pd
import pytest
from kartothek.io.dask.dataframe import collect_dataset_metadata
from kartothek.io.eager import (
store_dataframes_as_dataset,
update_dataset_from_dataframes,
)
from kartothek.io_components.metapartition import _METADATA_SCHEMA, MetaPartition
from kartothek.io_components.write import store_dataset_from_partitions
from kartothek.serialization import ParquetSerializer
def test_collect_dataset_metadata(store_session_factory, dataset):
df_stats = collect_dataset_metadata(
store=store_session_factory,
dataset_uuid="dataset_uuid",
table_name="table",
predicates=None,
frac=1,
).compute()
actual = df_stats.drop(
columns=[
"row_group_compressed_size",
"row_group_uncompressed_size",
"serialized_size",
],
axis=1,
)
actual.sort_values(by=["partition_label", "row_group_id"], inplace=True)
expected = pd.DataFrame(
data={
"partition_label": ["cluster_1", "cluster_2"],
"row_group_id": [0, 0],
"number_rows_total": [1, 1],
"number_row_groups": [1, 1],
"number_rows_per_row_group": [1, 1],
},
index=[0, 0],
)
pd.testing.assert_frame_equal(actual, expected)
def test_collect_dataset_metadata_predicates(store_session_factory, dataset):
predicates = [[("P", "==", 1)]]
df_stats = collect_dataset_metadata(
store=store_session_factory,
dataset_uuid="dataset_uuid",
table_name="table",
predicates=predicates,
frac=1,
).compute()
actual = df_stats.drop(
columns=[
"row_group_compressed_size",
"row_group_uncompressed_size",
"serialized_size",
],
axis=1,
)
actual.sort_values(by=["partition_label", "row_group_id"], inplace=True)
# Predicates are only evaluated on index level and have therefore no effect on this dataset
expected = pd.DataFrame(
data={
"partition_label": ["cluster_1", "cluster_2"],
"row_group_id": [0, 0],
"number_rows_total": [1, 1],
"number_row_groups": [1, 1],
"number_rows_per_row_group": [1, 1],
},
index=[0, 0],
)
pd.testing.assert_frame_equal(actual, expected)
def test_collect_dataset_metadata_predicates_on_index(store_factory):
df = pd.DataFrame(
data={"P": range(10), "L": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"]}
)
store_dataframes_as_dataset(
store=store_factory, dataset_uuid="dataset_uuid", partition_on=["L"], dfs=[df],
)
predicates = [[("L", "==", "b")]]
df_stats = collect_dataset_metadata(
store=store_factory,
dataset_uuid="dataset_uuid",
table_name="table",
predicates=predicates,
frac=1,
).compute()
assert "L=b" in df_stats["partition_label"].values[0]
df_stats.sort_values(by=["partition_label", "row_group_id"], inplace=True)
actual = df_stats.drop(
columns=[
"partition_label",
"row_group_compressed_size",
"row_group_uncompressed_size",
"serialized_size",
],
axis=1,
)
expected = pd.DataFrame(
data={
"row_group_id": [0],
"number_rows_total": [5],
"number_row_groups": [1],
"number_rows_per_row_group": [5],
},
index=[0],
)
pd.testing.assert_frame_equal(actual, expected)
def test_collect_dataset_metadata_predicates_row_group_size(store_factory):
ps = ParquetSerializer(chunk_size=2)
df = pd.DataFrame(
data={"P": range(10), "L": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"]}
)
store_dataframes_as_dataset(
store=store_factory,
dataset_uuid="dataset_uuid",
partition_on=["L"],
dfs=[df],
df_serializer=ps,
)
predicates = [[("L", "==", "a")]]
df_stats = collect_dataset_metadata(
store=store_factory,
dataset_uuid="dataset_uuid",
table_name="table",
predicates=predicates,
frac=1,
).compute()
for part_label in df_stats["partition_label"]:
assert "L=a" in part_label
df_stats.sort_values(by=["partition_label", "row_group_id"], inplace=True)
actual = df_stats.drop(
columns=[
"partition_label",
"row_group_compressed_size",
"row_group_uncompressed_size",
"serialized_size",
],
axis=1,
)
expected = pd.DataFrame(
data={
"row_group_id": [0, 1, 2],
"number_rows_total": [5, 5, 5],
"number_row_groups": [3, 3, 3],
"number_rows_per_row_group": [2, 2, 1],
},
index=[0, 1, 2],
)
pd.testing.assert_frame_equal(actual, expected)
def test_collect_dataset_metadata_frac_smoke(store_session_factory, dataset):
df_stats = collect_dataset_metadata(
store=store_session_factory,
dataset_uuid="dataset_uuid",
table_name="table",
frac=0.8,
).compute()
columns = {
"partition_label",
"row_group_id",
"row_group_compressed_size",
"row_group_uncompressed_size",
"number_rows_total",
"number_row_groups",
"serialized_size",
"number_rows_per_row_group",
}
assert set(df_stats.columns) == columns
def test_collect_dataset_metadata_empty_dataset_mp(store_factory):
mp = MetaPartition(label="cluster_1")
store_dataset_from_partitions(
partition_list=[mp], store=store_factory, dataset_uuid="dataset_uuid"
)
df_stats = collect_dataset_metadata(
store=store_factory, dataset_uuid="dataset_uuid", table_name="table"
).compute()
expected = pd.DataFrame(columns=_METADATA_SCHEMA.keys())
expected = expected.astype(_METADATA_SCHEMA)
pd.testing.assert_frame_equal(expected, df_stats, check_index_type=False)
def test_collect_dataset_metadata_empty_dataset(store_factory):
df = pd.DataFrame(columns=["A", "b"], index=pd.RangeIndex(start=0, stop=0))
store_dataframes_as_dataset(
store=store_factory, dataset_uuid="dataset_uuid", dfs=[df], partition_on=["A"]
)
df_stats = collect_dataset_metadata(
store=store_factory, dataset_uuid="dataset_uuid", table_name="table",
).compute()
expected = pd.DataFrame(columns=_METADATA_SCHEMA.keys())
expected = expected.astype(_METADATA_SCHEMA)
pd.testing.assert_frame_equal(expected, df_stats)
def test_collect_dataset_metadata_concat(store_factory):
"""Smoke-test concatenation of empty and non-empty dataset metadata collections."""
df = pd.DataFrame(data={"A": [1, 1, 1, 1], "b": [1, 1, 2, 2]})
store_dataframes_as_dataset(
store=store_factory, dataset_uuid="dataset_uuid", dfs=[df], partition_on=["A"]
)
df_stats1 = collect_dataset_metadata(
store=store_factory, dataset_uuid="dataset_uuid", table_name="table",
).compute()
# Remove all partitions of the dataset
update_dataset_from_dataframes(
[], store=store_factory, dataset_uuid="dataset_uuid", delete_scope=[{"A": 1}]
)
df_stats2 = collect_dataset_metadata(
store=store_factory, dataset_uuid="dataset_uuid", table_name="table",
).compute()
pd.concat([df_stats1, df_stats2])
def test_collect_dataset_metadata_delete_dataset(store_factory):
df = pd.DataFrame(data={"A": [1, 1, 1, 1], "b": [1, 1, 2, 2]})
store_dataframes_as_dataset(
store=store_factory, dataset_uuid="dataset_uuid", dfs=[df], partition_on=["A"]
)
# Remove all partitions of the dataset
update_dataset_from_dataframes(
[], store=store_factory, dataset_uuid="dataset_uuid", delete_scope=[{"A": 1}]
)
df_stats = collect_dataset_metadata(
store=store_factory, dataset_uuid="dataset_uuid", table_name="table",
).compute()
expected = | pd.DataFrame(columns=_METADATA_SCHEMA) | pandas.DataFrame |
from collections import abc
import numpy as np
import pytest
from pandas import (
CategoricalDtype,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameToRecords:
def test_to_records_dt64(self):
df = DataFrame(
[["one", "two", "three"], ["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"),
)
expected = df.index.values[0]
result = df.to_records()["index"][0]
assert expected == result
def test_to_records_dt64tz_column(self):
# GH#32535 dont less tz in to_records
df = DataFrame({"A": date_range("2012-01-01", "2012-01-02", tz="US/Eastern")})
result = df.to_records()
assert result.dtype["A"] == object
val = result[0][1]
assert isinstance(val, Timestamp)
assert val == df.loc[0, "A"]
def test_to_records_with_multindex(self):
# GH#3189
index = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)["level_0"]
assert "bar" in r
assert "one" not in r
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
abc.Mapping.register(email.message.Message)
headers = Parser().parsestr(
"From: <<EMAIL>>\n"
"To: <<EMAIL>>\n"
"Subject: Test message\n"
"\n"
"Body would go here\n"
)
frame = DataFrame.from_records([headers])
all(x in frame for x in ["Type", "Subject", "From"])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = "X"
rs = df.to_records()
assert "X" in rs.dtype.fields
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
assert "index" in rs.dtype.fields
df.index = MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
df.index.names = ["A", None]
rs = df.to_records()
assert "level_0" in rs.dtype.fields
def test_to_records_with_unicode_index(self):
# GH#13172
# unicode_literals conflict with to_records
result = DataFrame([{"a": "x", "b": "y"}]).set_index("a").to_records()
expected = np.rec.array([("x", "y")], dtype=[("a", "O"), ("b", "O")])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue GH#11879. to_records used to raise an exception when used
# with column names containing non-ascii characters in Python 2
result = DataFrame(data={"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
# to be specified using dictionary instead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", "accented_name_é"], "formats": ["=i8", "=f8"]},
)
tm.assert_almost_equal(result, expected)
def test_to_records_with_categorical(self):
# GH#8626
# dict creation
df = DataFrame({"A": list("abc")}, dtype="category")
expected = Series(list("abc"), dtype="category", name="A")
tm.assert_series_equal(df["A"], expected)
# list-like creation
df = DataFrame(list("abc"), dtype="category")
expected = Series(list("abc"), dtype="category", name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array(
[(0, "a"), (1, "b"), (2, "c")], dtype=[("index", "=i8"), ("0", "O")]
)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
# No dtypes --> default to array dtypes.
(
dict(),
np.rec.array(
[(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "<i8"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Should have no effect in this case.
(
dict(index=True),
np.rec.array(
[(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "<i8"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Column dtype applied across the board. Index unaffected.
(
dict(column_dtypes="<U4"),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U4"), ("B", "<U4"), ("C", "<U4")],
),
),
# Index dtype applied across the board. Columns unaffected.
(
dict(index_dtypes="<U1"),
np.rec.array(
[("0", 1, 0.2, "a"), ("1", 2, 1.5, "bc")],
dtype=[("index", "<U1"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Pass in a type instance.
(
dict(column_dtypes=str),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")],
),
),
# Pass in a dtype instance.
(
dict(column_dtypes=np.dtype("unicode")),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")],
),
),
# Pass in a dictionary (name-only).
(
dict(column_dtypes={"A": np.int8, "B": np.float32, "C": "<U2"}),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"), ("B", "<f4"), ("C", "<U2")],
),
),
# Pass in a dictionary (indices-only).
(
dict(index_dtypes={0: "int16"}),
np.rec.array(
[(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "i2"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Ignore index mappings if index is not True.
(
dict(index=False, index_dtypes="<U2"),
np.rec.array(
[(1, 0.2, "a"), (2, 1.5, "bc")],
dtype=[("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Non-existent names / indices in mapping should not error.
(
dict(index_dtypes={0: "int16", "not-there": "float32"}),
np.rec.array(
[(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "i2"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Names / indices not in mapping default to array dtype.
(
dict(column_dtypes={"A": np.int8, "B": np.float32}),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"), ("B", "<f4"), ("C", "O")],
),
),
# Names / indices not in dtype mapping default to array dtype.
(
dict(column_dtypes={"A": np.dtype("int8"), "B": np.dtype("float32")}),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"), ("B", "<f4"), ("C", "O")],
),
),
# Mixture of everything.
(
dict(column_dtypes={"A": np.int8, "B": np.float32}, index_dtypes="<U2"),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<U2"), ("A", "i1"), ("B", "<f4"), ("C", "O")],
),
),
# Invalid dype values.
(
dict(index=False, column_dtypes=list()),
(ValueError, "Invalid dtype \\[\\] specified for column A"),
),
(
dict(index=False, column_dtypes={"A": "int32", "B": 5}),
(ValueError, "Invalid dtype 5 specified for column B"),
),
# Numpy can't handle EA types, so check error is raised
(
dict(
index=False,
column_dtypes={"A": "int32", "B": CategoricalDtype(["a", "b"])},
),
(ValueError, "Invalid dtype category specified for column B"),
),
# Check that bad types raise
(
dict(index=False, column_dtypes={"A": "int32", "B": "foo"}),
(TypeError, "data type [\"']foo[\"'] not understood"),
),
],
)
def test_to_records_dtype(self, kwargs, expected):
# see GH#18146
df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
if not isinstance(expected, np.recarray):
with pytest.raises(expected[0], match=expected[1]):
df.to_records(**kwargs)
else:
result = df.to_records(**kwargs)
| tm.assert_almost_equal(result, expected) | pandas._testing.assert_almost_equal |
import os
import sys
import inspect
from copy import deepcopy
import numpy as np
import pandas as pd
from ucimlr.helpers import (download_file, download_unzip, one_hot_encode_df_, xy_split,
normalize_df_, split_normalize_sequence, split_df, get_split, split_df_on_column)
from ucimlr.dataset import Dataset
from ucimlr.constants import TRAIN
from ucimlr.constants import REGRESSION
def all_datasets():
"""
Returns a list of all RegressionDataset classes.
"""
return [cls for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and issubclass(cls, RegressionDataset)
and cls != RegressionDataset]
class RegressionDataset(Dataset):
type_ = REGRESSION # Is this necessary?
@property
def num_targets(self):
return self.y.shape[1]
class Abalone(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Abalone).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
download_file(url, dataset_path, filename)
df = pd.read_csv(file_path, header=None)
y_columns = df.columns[-1:]
one_hot_encode_df_(df)
df_test, df_train, df_valid = split_df(df, [0.2, 0.8 - 0.8 * validation_size, 0.8 * validation_size])
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class AirFoil(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'airfoil_self_noise.dat'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\t', names =["Frequency(Hz)", "Angle of attacks(Deg)", "Chord length(m)", "Free-stream velocity(m/s)", "Suction side displacement thickness(m)", " Scaled sound pressure level(Db)"])
y_columns = ['Scaled sound pressure level(Db)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class AirQuality(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'AirQualityUCI.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00360/AirQualityUCI.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';', parse_dates=[0, 1])
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
df.Date = (df.Date - df.Date.min()).astype('timedelta64[D]') # Days as int
df.Time = df.Time.apply(lambda x: int(x.split('.')[0])) # Hours as int
df['C6H6(GT)'] = df['C6H6(GT)'].apply(lambda x: float(x.replace(',', '.'))) # Target as float
# Some floats are given with ',' instead of '.'
df = df.applymap(lambda x: float(x.replace(',', '.')) if type(x) is str else x) # Target as float
df = df[df['C6H6(GT)'] != -200] # Drop all rows with missing target values
df.loc[df['CO(GT)'] == -200, 'CO(GT)'] = -10 # -200 means missing value, shifting this to be closer to
# the other values for this column
y_columns = ['C6H6(GT)']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Appliances_energy_prediction(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Appliances+energy+prediction).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'energydata_complete.csv'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00374/energydata_complete.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, parse_dates=[0, 1])
df.date = (df.date - df.date.min()).astype('timedelta64[D]')
y_columns = ['Appliances']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
self.problem_type = REGRESSION
class AutoMPG(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'auto-mpg.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep='\s+', names =["mpg", "cylinders", "displacements", "horsepower", "weight", "acceleration", "model year", "origin", "car name"])
y_columns = ['mpg']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
sel.problem_type=REGRESSION
class Automobile(RegressionDataset):
"""
Link to the dataset [description](https://archive.ics.uci.edu/ml/datasets/Automobile).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'imports-85.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names = ["symboling", "normalized-losses", "make", "fuel-type", " aspiration", "num-of-doors", "body-style", "drive-wheels", "engine-location", "wheel-base", " length", "width", " height", "curb-weight", "engine-type", "num-of-cylinders", "engine-size", " fuel-system", " bore", "stroke", " compression-ratio", "horsepower", "peak-rpm", "city-mpg", "highway-mpg", "price"])
y_columns = ['']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class BeijingAirQuality(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00501/PRSA2017_Data_20130301-20170228.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if 'PRSA_Data' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class BeijingPM(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Beijing+PM2.5+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'PRSA_data_2010.1.1-2014.12.31.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00381/PRSA_data_2010.1.1-2014.12.31.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
y_columns=['pm2.5']
self.x, self.y = split_normalize_sequence(df, y_columns, validation_size, split, self.type)
self.problem_type = REGRESSION
class BiasCorrection(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bias+correction+of+numerical+prediction+model+temperature+forecast).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Bias_correction_ucl.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00514/Bias_correction_ucl.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col = 'Date', parse_dates= True)
class BikeSharing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00275/Bike-Sharing-Dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path))
class CarbonNanotubes(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Carbon+Nanotubes).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'carbon_nanotubes.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00448/carbon_nanotubes.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ChallengerShuttleORing(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'o-ring-erosion-only.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class BlogFeedback(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/BlogFeedback).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
file_name = 'blogData_train.csv'
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00304/BlogFeedback.zip'
download_unzip(url, dataset_path)
# Iterate all test csv and concatenate to one DataFrame
test_dfs = []
for fn in os.listdir(dataset_path):
if 'blogData_test' not in fn:
continue
file_path = os.path.join(dataset_path, fn)
test_dfs.append(pd.read_csv(file_path, header=None))
df_test = pd.concat(test_dfs)
file_path = os.path.join(dataset_path, file_name)
df_train_valid = pd.read_csv(file_path, header=None)
y_columns = [280]
df_train_valid[y_columns[0]] = np.log(df_train_valid[y_columns[0]] + 0.01)
df_test[y_columns[0]] = np.log(df_test[y_columns[0]] + 0.01)
page_columns = list(range(50))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class CommunitiesCrime(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'communities.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/communities/communities.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,header=None)
class ConcreteSlumpTest(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Slump+Test).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'slump_test.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class PropulsionPlants (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00316/UCI CBM Dataset.zip'
download_unzip(url, dataset_path)
filename = 'data.txt'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, index_col='dteday', parse_dates=True)
class ConcreteCompressiveStrength (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Concrete+Compressive+Strength).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Concrete_Data.xls'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_excel(file_path)
class ComputerHardware (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Computer+Hardware).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'machine.data'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/cpu-performance/machine.data'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, names=["vendor name", "Model Name", "MYCT", "MMIN", "MMAX", "CACH", "CHMIN", "CHMAX", "PRP", "ERP"])
class CommunitiesCrimeUnnormalized (RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime+Unnormalized).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'CommViolPredUnnormalizedData.txt'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00211/CommViolPredUnnormalizedData.txt'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, keep_default_na=False, header=None)
class CTSlices(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00206/slice_localization_data.zip'
download_unzip(url, dataset_path)
file_name = 'slice_localization_data.csv'
file_path = os.path.join(dataset_path, file_name)
df = pd.read_csv(file_path)
# No patient should be in both train and test set
df_train_valid = deepcopy(df.loc[df.patientId < 80, :]) # Pandas complains if it is a view
df_test = deepcopy(df.loc[df.patientId >= 80, :]) # - " -
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'patientId')
y_columns = ['reference']
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
df_res = df_res.drop(columns='patientId')
self.x, self.y = xy_split(df_res, y_columns)
class ForecastingOrders(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Daily+Demand+Forecasting+Orders).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/Daily_Demand_Forecasting_Orders.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep=';')
class ForecastingStoreData(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Demand+Forecasting+for+a+store).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Daily_Demand_Forecasting_Orders.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00409/'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path,sep='\s+')
class FacebookComments(RegressionDataset):
"""
Predict the number of likes on posts from a collection of Facebook pages.
Every page has multiple posts, making the number of pages less than the samples
in the dataset (each sample is one post).
# Note
The provided test split has a relatively large discrepancy in terms
of distributions of the features and targets. Training and validation splits are
also made to ensure that the same page is not in both splits. This makes the distributions
of features in training and validation splits vary to a relatively large extent, possible
because the number of pages are not that many, while the features are many.
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Facebook+Comment+Volume+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00363/Dataset.zip'
download_unzip(url, dataset_path)
dataset_path = os.path.join(dataset_path, 'Dataset')
# The 5th variant has the most data
train_path = os.path.join(dataset_path, 'Training', 'Features_Variant_5.csv')
test_path = os.path.join(dataset_path, 'Testing', 'Features_TestSet.csv')
df_train_valid = pd.read_csv(train_path, header=None)
df_test = pd.read_csv(test_path, header=None)
y_columns = df_train_valid.columns[-1:]
# Page ID is not included, but can be derived. Page IDs can not be
# in both training and validation sets
page_columns = list(range(29))
for i, (_, df_group) in enumerate(df_train_valid.groupby(page_columns)):
df_train_valid.loc[df_group.index, 'page_id'] = i
df_train, df_valid = split_df_on_column(df_train_valid, [1 - validation_size, validation_size], 'page_id')
df_train.drop(columns='page_id', inplace=True)
df_valid.drop(columns='page_id', inplace=True)
normalize_df_(df_train, other_dfs=[df_valid, df_test])
df_res = get_split(df_train, df_valid, df_test, split)
self.x, self.y = xy_split(df_res, y_columns)
class Facebookmetrics (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Condition+Based+Maintenance+of+Naval+Propulsion+Plants).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00368/Facebook_metrics.zip'
download_unzip(url, dataset_path)
filename = 'dataset_Facebook.csv'
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, sep=';')
class ForestFires(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Forest+Fires).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'forestfires.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/forest-fires/forestfires.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class GNFUV(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00452/GNFUV USV Dataset.zip'
download_unzip(url, dataset_path)
dfs = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
dfs.append(pd.read_csv(file_path, header=None))
class GNFUV_2(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/GNFUV+Unmanned+Surface+Vehicles+Sensor+Data+Set+2).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00466/CNFUV_Datasets.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None))
class Greenhouse_Gas_Observing_Network (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Greenhouse+Gas+Observing+Network).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00328/ghg_data.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, header=None, sep='\s+'))
class Hungarian_Chickenpox_Cases (RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Hungarian+Chickenpox+Cases).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00580/hungary_chickenpox.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, index_col='Date', parse_dates=True))
class IIWA14_R820_Gazebo_Dataset_10Trajectories(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/IIWA14-R820-Gazebo-Dataset-10Trajectories).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00574/IIWA14-R820-Gazebo-Dataset-10Trayectorias.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path, header=None)
class Metro_Interstate_Traffic_Volume(RegressionDataset):
"""
Link to the dataset [description]http://archive.ics.uci.edu/ml/datasets/Metro+Interstate+Traffic+Volume).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Metro_Interstate_Traffic_Volume.csv.gz'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00492/Metro_Interstate_Traffic_Volume.csv.gz'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_Facebook_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'Facebook_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/Facebook_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_GooglePlus_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'GooglePlus_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/GooglePlus_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Economy(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Economy.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Economy.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Microsoft(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Microsoft.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Microsoft.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Obama(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Obama.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Obama.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_LinkedIn_Palestine(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'LinkedIn_Palestine.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/LinkedIn_Palestine.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class News_Popularity_News_Final(RegressionDataset):
"""
Link to the dataset [descriptionhttp://archive.ics.uci.edu/ml/datasets/News+Popularity+in+Multiple+Social+Media+Platforms).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'News_Final.csv'
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00432/Data/News_Final.csv'
download_file(url, dataset_path, filename)
file_path = os.path.join(dataset_path, filename)
df = pd.read_csv(file_path)
class Online_Video_Characteristics_and_Transcoding_Time(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/Online+Video+Characteristics+and+Transcoding+Time+Dataset).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00335/online_video_dataset.zip'
download_unzip(url, dataset_path)
df = []
for fn in os.listdir(dataset_path):
if fn == 'README.txt':
continue
file_path = os.path.join(dataset_path, fn)
df.append(pd.read_csv(file_path, sep='\t'))
class OnlineNews(RegressionDataset):
"""
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00332/OnlineNewsPopularity.zip'
download_unzip(url, dataset_path)
file_path = os.path.join(dataset_path, 'OnlineNewsPopularity', 'OnlineNewsPopularity.csv')
df = pd.read_csv(file_path, )
df.drop(columns=['url', ' timedelta'], inplace=True)
y_columns = [' shares']
df[y_columns[0]] = np.log(df[y_columns[0]])
self.x, self. y = split_normalize_sequence(df, y_columns, validation_size, split, self.type_)
class Parkinson(RegressionDataset):
"""
Link to the dataset [description](http://archive.ics.uci.edu/ml/datasets/parkinsons).
# Parameters
root (str): Local path for storing/reading dataset files.
split (str): One of {'train', 'validation', 'test'}
validation_size (float): How large fraction in (0, 1) of the training partition to use for validation.
"""
def __init__(self, root, split=TRAIN, validation_size=0.2):
dataset_path = os.path.join(root, self.name)
filename = 'data.csv'
file_path: str = os.path.join(dataset_path, filename)
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/' \
'parkinsons/telemonitoring/parkinsons_updrs.data'
download_file(url, dataset_path, filename)
df = | pd.read_csv(file_path) | pandas.read_csv |
import math
import pandas as pd
import numpy as np
RATINGS_CSV_FILE = 'ml1m_ratings.csv'
MODEL_WEIGHTS_FILE = 'ml1m_weights.h5'
LIBSVM_FILE= 'ml1m'
K_FACTORS = 60
RNG_SEED = 1446557
ratings = pd.read_csv(RATINGS_CSV_FILE,
sep='\t',
encoding='latin-1',
usecols=['userid', 'movieid', 'rating', 'timestamp'])
#ratings = ratings.sort_values(by = ['userid', 'timestamp'], ascending=True)
user_count = ratings.groupby(['userid']).count()
max_userid = ratings['userid'].drop_duplicates().max()
max_movieid = ratings['movieid'].drop_duplicates().max()
train_df = []
vali_df = []
test_df = []
for i in range(max_userid):
uid = i+1
df_slice = ratings.loc[ratings['userid'] == uid].sort_values(by=['timestamp'], ascending=True)
cnt = df_slice.count()['userid']
if cnt < 40:
train_df.append(df_slice)
else:
slice1, slice2, slice3, slice4 = np.array_split(df_slice, 4)
train_df.extend([slice1, slice2])
vali_df.append(slice3)
test_df.append(slice4)
train_part = | pd.concat(train_df) | pandas.concat |
# -*- coding: utf-8 -*-
from __future__ import print_function
from numpy import nan
import numpy as np
from pandas import compat
from pandas import (DataFrame, Series, MultiIndex, Timestamp,
date_range)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameConvertTo(tm.TestCase, TestData):
_multiprocess_can_split_ = True
def test_to_dict(self):
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
recons_data = DataFrame(test_data).to_dict()
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("l")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][int(k2) - 1])
recons_data = DataFrame(test_data).to_dict("s")
for k, v in compat.iteritems(test_data):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k][k2])
recons_data = DataFrame(test_data).to_dict("sp")
expected_split = {'columns': ['A', 'B'], 'index': ['1', '2', '3'],
'data': [[1.0, '1'], [2.0, '2'], [nan, '3']]}
tm.assert_dict_equal(recons_data, expected_split)
recons_data = DataFrame(test_data).to_dict("r")
expected_records = [{'A': 1.0, 'B': '1'},
{'A': 2.0, 'B': '2'},
{'A': nan, 'B': '3'}]
tm.assertIsInstance(recons_data, list)
self.assertEqual(len(recons_data), 3)
for l, r in zip(recons_data, expected_records):
tm.assert_dict_equal(l, r)
# GH10844
recons_data = DataFrame(test_data).to_dict("i")
for k, v in | compat.iteritems(test_data) | pandas.compat.iteritems |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from pathlib import Path
from typing import Sequence, Union, List, Dict, Optional
import pandas as pd
from qf_lib.common.enums.frequency import Frequency
from qf_lib.common.enums.price_field import PriceField
from qf_lib.common.tickers.tickers import Ticker
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
from qf_lib.common.utils.miscellaneous.to_list_conversion import convert_to_list
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.containers.futures.future_tickers.future_ticker import FutureTicker
from qf_lib.data_providers.helpers import normalize_data_array, tickers_dict_to_data_array
from qf_lib.data_providers.preset_data_provider import PresetDataProvider
class CSVDataProvider(PresetDataProvider):
"""
Generic Data Provider that loads csv files. All the files should have a certain naming convention (see Notes).
Additionally, the data provider requires providing mapping between header names in the file and corresponding
price fields in the form of dictionary where the key is a column name from the file, and the value is
a corresponding Price field. Please note that this is required to use get_price method. For example:
Time,Open price,Close Price, ...
...
Should me mapped as following: {'Open Price': PriceField.Open, 'Close Price': PriceField.Close, ...} in order to
have correctly working get_price method that requires PriceFields as the fields.
Parameters
-----------
path: str
it should be either path to the directory containing the CSV files or path to the specific file when ticker_col
is used and only one file should be loaded
tickers: Ticker, Sequence[Ticker]
one or a list of tickers, used further to download the prices data
index_col: str
Label of the dates / timestamps column, which will be later on used to index the data
field_to_price_field_dict: Optional[Dict[str, PriceField]]
mapping of header to fields. The key is a column name, and the value is a corresponding field. It is requried
if we want to map str fields to PriceFields and use get_price method. Please note that mappedd fields will be
still available in get_history method using initial str values. All str fields specified as the keys
should also be specified in the fields
fields: Optional[str, List[str]]
fields that should be downloaded. By default all fields (columns) are downloaded. Based on field_to_price_field_dict
additional columns will be created and available in the get_price method thanks to PriceFields mapping.
start_date: Optional[datetime]
first date to be downloaded
end_date: Optional[datetime]
last date to be downloaded
frequency: Optional[Frequency]
frequency of the data. The parameter is optional, and by default equals to daily Frequency.
dateformat: Optional[str]
the strftime to parse time, e.g. "%d/%m/%Y". Parameter is Optional and if not provided, the data provider will
try to infer the dates format from the data. By default None.
ticker_col: Optional[str]
column name with the tickers
Notes
-----
- FutureTickers are not supported by this data provider.
- By default, data for each ticker should be in a separate file named after this tickers' string representation
(in most cases it is simply its name, to check what is the string representation of a given ticker use
Ticker.as_string() function). However, you can also load one file containing all data with specified tickers in
one column row by row as it is specified in demo example file daily_data.csv or intraday_data.csv.
In order to do so you need to specify the name of the ticker column in ticker_col and specify the path to the file.
- Please note that when using ticker_col it is required to provide the path to specific file (loading is not
based on ticker names as it is in the default approach)
- By providing mapping field_to_price_field_dict you are able to use get_price method which allows you to
aggregate intraday data (currently, get_history does not allow using intraday data aggregation)
"""
def __init__(self, path: str, tickers: Union[Ticker, Sequence[Ticker]], index_col: str,
field_to_price_field_dict: Optional[Dict[str, PriceField]] = None, fields: Optional[Union[str, List[str]]] = None,
start_date: Optional[datetime] = None, end_date: Optional[datetime] = None,
frequency: Optional[Frequency] = Frequency.DAILY, dateformat: Optional[str] = None, ticker_col: Optional[str] = None):
self.logger = qf_logger.getChild(self.__class__.__name__)
if fields:
fields, _ = convert_to_list(fields, str)
# Convert to list and remove duplicates
tickers, _ = convert_to_list(tickers, Ticker)
tickers = list(dict.fromkeys(tickers))
assert len([t for t in tickers if isinstance(t, FutureTicker)]) == 0, "FutureTickers are not supported by " \
"this data provider"
data_array, start_date, end_date, available_fields = self._get_data(path, tickers, fields, start_date, end_date, frequency, field_to_price_field_dict,
index_col, dateformat, ticker_col)
normalized_data_array = normalize_data_array(data_array, tickers, available_fields, False, False, False)
super().__init__(data=normalized_data_array,
start_date=start_date,
end_date=end_date,
frequency=frequency)
def _get_data(self, path: str, tickers: Sequence[Ticker], fields: Optional[Sequence[str]], start_date: datetime,
end_date: datetime, frequency: Frequency, field_to_price_field_dict: Optional[Dict[str, PriceField]],
index_col: str, dateformat: str, ticker_col):
tickers_str_mapping = {ticker.as_string(): ticker for ticker in tickers}
tickers_prices_dict = {}
available_fields = set()
def _process_df(df, ticker_str):
df.index = | pd.to_datetime(df[index_col], format=dateformat) | pandas.to_datetime |
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing, get_col_mapping_ce
class TestInverseTransformCaterogyEncoder(unittest.TestCase):
def test_inverse_transform_1(self):
"""
Test no preprocessing
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR']})
original = inverse_transform(train)
pd.testing.assert_frame_equal(original, train)
def test_inverse_transform_2(self):
"""
Test multiple preprocessing
"""
train = pd.DataFrame({'Onehot1': ['A', 'B', 'A', 'B'], 'Onehot2': ['C', 'D', 'C', 'D'],
'Binary1': ['E', 'F', 'E', 'F'], 'Binary2': ['G', 'H', 'G', 'H'],
'Ordinal1': ['I', 'J', 'I', 'J'], 'Ordinal2': ['K', 'L', 'K', 'L'],
'BaseN1': ['M', 'N', 'M', 'N'], 'BaseN2': ['O', 'P', 'O', 'P'],
'Target1': ['Q', 'R', 'Q', 'R'], 'Target2': ['S', 'T', 'S', 'T'],
'other': ['other', np.nan, 'other', 'other']})
test = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'ZZ'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'ZZ'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'ZZ'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', 'ZZ'],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'ZZ'],
'other': ['other', '123', np.nan]})
expected = pd.DataFrame({'Onehot1': ['A', 'B', 'A'], 'Onehot2': ['C', 'D', 'missing'],
'Binary1': ['E', 'F', 'F'], 'Binary2': ['G', 'H', 'missing'],
'Ordinal1': ['I', 'J', 'J'], 'Ordinal2': ['K', 'L', 'missing'],
'BaseN1': ['M', 'N', 'N'], 'BaseN2': ['O', 'P', np.nan],
'Target1': ['Q', 'R', 'R'], 'Target2': ['S', 'T', 'NaN'],
'other': ['other', '123', np.nan]})
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
enc_onehot = ce.OneHotEncoder(cols=['Onehot1', 'Onehot2']).fit(train)
train_onehot = enc_onehot.transform(train)
enc_binary = ce.BinaryEncoder(cols=['Binary1', 'Binary2']).fit(train_onehot)
train_binary = enc_binary.transform(train_onehot)
enc_ordinal = ce.OrdinalEncoder(cols=['Ordinal1', 'Ordinal2']).fit(train_binary)
train_ordinal = enc_ordinal.transform(train_binary)
enc_basen = ce.BaseNEncoder(cols=['BaseN1', 'BaseN2']).fit(train_ordinal)
train_basen = enc_basen.transform(train_ordinal)
enc_target = ce.TargetEncoder(cols=['Target1', 'Target2']).fit(train_basen, y)
input_dict1 = dict()
input_dict1['col'] = 'Onehot2'
input_dict1['mapping'] = pd.Series(data=['C', 'D', np.nan], index=['C', 'D', 'missing'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'Binary2'
input_dict2['mapping'] = pd.Series(data=['G', 'H', np.nan], index=['G', 'H', 'missing'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'Ordinal2'
input_dict3['mapping'] = pd.Series(data=['K', 'L', np.nan], index=['K', 'L', 'missing'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
result1 = enc_onehot.transform(test)
result2 = enc_binary.transform(result1)
result3 = enc_ordinal.transform(result2)
result4 = enc_basen.transform(result3)
result5 = enc_target.transform(result4)
original = inverse_transform(result5, [enc_onehot, enc_binary, enc_ordinal, enc_basen, enc_target, input_dict1,
list_dict])
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_3(self):
"""
Test target encoding
"""
train = pd.DataFrame({'city': ['chicago', 'paris', 'paris', 'chicago', 'chicago'],
'state': ['US', 'FR', 'FR', 'US', 'US'],
'other': ['A', 'A', np.nan, 'B', 'B']})
test = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
expected = pd.DataFrame({'city': ['chicago', 'paris', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', np.nan, np.nan]})
y = pd.DataFrame(data=[0, 1, 1, 0, 1], columns=['y'])
enc = ce.TargetEncoder(cols=['city', 'state']).fit(train, y)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_4(self):
"""
Test ordinal encoding
"""
train = pd.DataFrame({'city': ['chicago', 'st louis']})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
expected = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(expected, original)
def test_inverse_transform_5(self):
"""
Test inverse_transform having Nan in train and handle missing value expect returned with nan_Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_6(self):
"""
test inverse_transform having Nan in train and handle missing return Nan expect returned with nan_Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
enc = ce.OrdinalEncoder(handle_missing='return_nan', handle_unknown='value')
result = enc.fit_transform(train)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_7(self):
"""
test inverse_transform both fields are return Nan with Nan Expect ValueError Ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OrdinalEncoder(handle_missing='return_nan', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_8(self):
"""
test inverse_transform having missing and no Uknown expect inversed ordinal
"""
train = pd.DataFrame({'city': ['chicago', np.nan]})
test = pd.DataFrame({'city': ['chicago', 'los angeles']})
enc = ce.OrdinalEncoder(handle_missing='value', handle_unknown='return_nan')
enc.fit(train)
result = enc.transform(test)
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(train, original)
def test_inverse_transform_9(self):
"""
test inverse_transform having handle missing value and handle unknown return Nan expect best inverse ordinal
"""
train = | pd.DataFrame({'city': ['chicago', np.nan]}) | pandas.DataFrame |
"""
TGBase features generation for dynamic node classification
"""
import argparse
import sys
import numpy as np
import random
from utils import *
import pandas as pd
from tqdm import tqdm
rnd_seed = 2021
random.seed(rnd_seed)
def init_node_memory(node_list, no_edge_feats):
"""
initialize the nodes' feature state list for the given node_list
"""
init_node_states = {}
for node in node_list:
init_node_states[node] = {}
init_node_states[node]['no_event'] = 0 # number of events involving this node so far
init_node_states[node]['time_avg'] = 0
init_node_states[node]['time_min'] = float('inf'),
init_node_states[node]['time_max'] = 0
init_node_states[node]['time_sum'] = 0
init_node_states[node]['time_last'] = 0 # timestamp of the previous seen event
init_node_states[node]['time_std'] = 0
for feat_idx in range(no_edge_feats):
init_node_states[node]['feat_' + str(feat_idx) + '_avg'] = 0
init_node_states[node]['feat_' + str(feat_idx) + '_min'] = float('inf')
init_node_states[node]['feat_' + str(feat_idx) + '_max'] = 0
init_node_states[node]['feat_' + str(feat_idx) + '_sum'] = 0
init_node_states[node]['feat_' + str(feat_idx) + '_std'] = 0
return init_node_states
def update_node_state(current_node_state, timestamp, edge_feature):
"""
update the state of one node based on the event characteristics
"""
time_interval = timestamp - current_node_state['time_last']
new_node_state = {'no_event': current_node_state['no_event'] + 1,
'time_avg': (current_node_state['time_avg'] * current_node_state['no_event'] + time_interval) / (
current_node_state['no_event'] + 1),
'time_min': min(current_node_state['time_min'], time_interval),
'time_max': max(current_node_state['time_max'], time_interval),
'time_sum': current_node_state['time_sum'] + time_interval,
'time_last': timestamp,
}
new_node_state['time_std'] = np.sqrt(
((time_interval - current_node_state['time_avg']) * (time_interval - new_node_state['time_avg']) +
(current_node_state['no_event']) * current_node_state['time_std'] ** 2) / new_node_state['no_event'])
for feat_idx in range(len(edge_feature)):
id = 'feat_' + str(feat_idx)
new_node_state[id + '_avg'] = (current_node_state[id + '_avg'] * current_node_state['no_event'] + edge_feature[
feat_idx]) / (current_node_state['no_event'] + 1)
new_node_state[id + '_min'] = min(current_node_state[id + '_min'], edge_feature[feat_idx])
new_node_state[id + '_max'] = max(current_node_state[id + '_max'], edge_feature[feat_idx])
new_node_state[id + '_sum'] = current_node_state[id + '_sum'] + edge_feature[feat_idx]
new_node_state[id + '_std'] = np.sqrt(((edge_feature[feat_idx] - new_node_state[id + '_avg']) * (
edge_feature[feat_idx] - current_node_state[id + '_avg']) +
current_node_state['no_event'] * current_node_state[
id + '_std'] ** 2) / (new_node_state['no_event']))
return new_node_state
def gen_dynamic_emb_for_data_split(data, node_memory, edge_features):
"""
generate dynamic embeddings for a list of nodes
"""
emb_list = []
print("Info: Number of interactions:", len(data.sources))
for idx, source in tqdm(enumerate(data.sources)): # NB: Only "source" nodes
prev_source_state = node_memory[source] # current state features
current_source_state = update_node_state(prev_source_state, data.timestamps[idx],
edge_features[data.edge_idxs[idx]])
node_memory[source] = current_source_state
# if 'time_last' in node_states[source]: del node_states[source]['time_last']
current_source_state['node_id'] = source
current_source_state['timestamp'] = data.timestamps[idx]
current_source_state['label'] = data.labels[idx]
emb_list.append(current_source_state)
return node_memory, emb_list
def append_mask_to_emb(emb_list, mask_triplet):
for emb in emb_list:
emb['train_mask'] = mask_triplet[0]
emb['val_mask'] = mask_triplet[1]
emb['test_mask'] = mask_triplet[2]
return emb_list
def generate_TGBase_DynEmb(network, val_ratio, test_ratio, use_validation):
"""
generate TGBase dynamic embeddings for a dataset
"""
full_data, node_features, edge_features, \
train_data, val_data, test_data = get_data_node_classification(network, val_ratio, test_ratio, use_validation)
node_list = full_data.unique_nodes
print("Info: Total Number of nodes: {}".format(len(node_list)))
no_edge_feats = len(edge_features[0])
node_memory = init_node_memory(node_list, no_edge_feats)
node_emb_list = []
# train split
print("Info: Generating embeddings for training set...")
node_memory, emb_list_train = gen_dynamic_emb_for_data_split(train_data, node_memory, edge_features)
train_embs = append_mask_to_emb(emb_list_train, (1, 0, 0))
dyEmb_filename = f'./data/{network}/{network}_TGBase_emb_train.csv'
node_emb_df = | pd.DataFrame(train_embs) | pandas.DataFrame |
import json
import pickle
from datetime import date, datetime
from typing import Any
from unittest import TestCase
import numpy as np
import pandas as pd
import pyarrow as pa
from pytest import raises
from slide.exceptions import (
SlideCastError,
SlideIndexIncompatibleError,
SlideInvalidOperation,
)
from slide.utils import SlideUtils
from slide_test.utils import assert_duck_eq, assert_pdf_eq, make_rand_df
from triad import Schema
from triad.utils.pyarrow import expression_to_schema, TRIAD_DEFAULT_TIMESTAMP
class SlideTestSuite(object):
"""Pandas-like utils test suite.
Any new :class:`~slide.utils.SlideUtils` should pass this test suite.
"""
class Tests(TestCase):
@classmethod
def setUpClass(cls):
# register_default_sql_engine(lambda engine: engine.sql_engine)
cls._utils = cls.make_utils(cls)
pass
def make_utils(self) -> SlideUtils:
raise NotImplementedError
@property
def utils(self) -> SlideUtils:
return self._utils # type: ignore
@classmethod
def tearDownClass(cls):
# cls._engine.stop()
pass
def to_pd(self, data: Any) -> pd.DataFrame:
raise NotImplementedError
def to_df(
self,
data: Any,
columns: Any = None,
coerce: bool = True,
):
raise NotImplementedError
def test_to_safe_pa_type(self):
assert pa.string() == self.utils.to_safe_pa_type(np.dtype(str))
assert pa.string() == self.utils.to_safe_pa_type(np.dtype(object))
assert TRIAD_DEFAULT_TIMESTAMP == self.utils.to_safe_pa_type(
np.dtype("datetime64[ns]")
)
if pd.__version__ >= "1.2":
assert pa.float64() == self.utils.to_safe_pa_type(pd.Float64Dtype())
assert pa.float32() == self.utils.to_safe_pa_type(pd.Float32Dtype())
assert pa.string() == self.utils.to_safe_pa_type(str)
assert pa.string() == self.utils.to_safe_pa_type("string")
assert pa.date32() == self.utils.to_safe_pa_type(date)
assert TRIAD_DEFAULT_TIMESTAMP == self.utils.to_safe_pa_type(datetime)
def test_is_series(self):
df = self.to_df([["a", 1]], "a:str,b:long")
assert self.utils.is_series(df["a"])
assert not self.utils.is_series(None)
assert not self.utils.is_series(1)
assert not self.utils.is_series("abc")
def test_to_series(self):
s1 = self.utils.to_series(pd.Series([0, 1], name="x"))
s2 = self.utils.to_series(pd.Series([2, 3], name="x"), "y")
s3 = self.utils.to_series([4, 5], "z")
s4 = self.utils.to_series(self.utils.to_series(s2), "w")
assert self.utils.is_series(s1)
assert self.utils.is_series(s2)
assert self.utils.is_series(s3)
assert self.utils.is_series(s4)
df = self.utils.cols_to_df([s1, s2, s3, s4])
assert_pdf_eq(
self.to_pd(df),
pd.DataFrame(dict(x=[0, 1], y=[2, 3], z=[4, 5], w=[2, 3])),
)
def test_to_constant_series(self):
s = self.utils.to_series(pd.Series([0, 1], name="x"))
s1 = self.utils.to_constant_series("a", s, name="y")
s2 = self.utils.to_constant_series(None, s, name="z", dtype="float64")
df = self.utils.cols_to_df([s, s1, s2])
assert_pdf_eq(
self.to_pd(df),
pd.DataFrame(dict(x=[0, 1], y=["a", "a"], z=[None, None])),
)
def test_get_col_pa_type(self):
df = self.to_df(
[["a", 1, 1.1, True, datetime.now()]],
"a:str,b:long,c:double,d:bool,e:datetime",
)
assert pa.types.is_string(self.utils.get_col_pa_type(df["a"]))
assert pa.types.is_string(self.utils.get_col_pa_type("a"))
assert pa.types.is_int64(self.utils.get_col_pa_type(df["b"]))
assert pa.types.is_integer(self.utils.get_col_pa_type(123))
assert pa.types.is_float64(self.utils.get_col_pa_type(df["c"]))
assert pa.types.is_floating(self.utils.get_col_pa_type(1.1))
assert pa.types.is_boolean(self.utils.get_col_pa_type(df["d"]))
assert pa.types.is_boolean(self.utils.get_col_pa_type(False))
assert pa.types.is_timestamp(self.utils.get_col_pa_type(df["e"]))
assert pa.types.is_timestamp(self.utils.get_col_pa_type(datetime.now()))
def test_unary_arithmetic_op(self):
pdf = pd.DataFrame([[2.0], [0.0], [None], [-3.0]], columns=["a"])
df = self.to_df(pdf)
df["a"] = self.utils.unary_arithmetic_op(df["a"], "+")
assert_pdf_eq(self.to_pd(df), pdf)
df["a"] = self.utils.unary_arithmetic_op(df["a"], "-")
pdf = pd.DataFrame([[-2.0], [0.0], [None], [3.0]], columns=["a"])
assert_pdf_eq(self.to_pd(df), pdf)
df["a"] = self.utils.unary_arithmetic_op(-10.1, "-")
pdf = pd.DataFrame([[10.1], [10.1], [10.1], [10.1]], columns=["a"])
assert_pdf_eq(self.to_pd(df), pdf)
raises(
NotImplementedError,
lambda: self.utils.unary_arithmetic_op(df["a"], "]"),
)
def test_binary_arithmetic_op(self):
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.binary_arithmetic_op(df.a, df.b, op)
df["e"] = self.utils.binary_arithmetic_op(df.a, 1.0, op)
df["f"] = self.utils.binary_arithmetic_op(1.0, df.b, op)
df["g"] = self.utils.binary_arithmetic_op(1.0, 2.0, op)
df["h"] = self.utils.binary_arithmetic_op(1.0, df.c, op)
df["i"] = self.utils.binary_arithmetic_op(df.a, df.c, op)
assert_duck_eq(
self.to_pd(df[list("defghi")]),
f"""
SELECT
a{op}b AS d, a{op}1.0 AS e, 1.0{op}b AS f,
1.0{op}2.0 AS g, 1.0{op}c AS h, a{op}c AS i
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=[1.0, 2.0, 3.0, 4.0],
b=[2.0, 2.0, 0.1, 2.0],
c=[1.0, None, 1.0, float("nan")],
)
)
test_(pdf, "+")
test_(pdf, "-")
test_(pdf, "*")
test_(pdf, "/")
# Integer division and dividing by 0 do not have consistent behaviors
# on different SQL engines. So we can't unify.
# SELECT 1.0/0.0 AS x, 1/2 AS y
def test_comparison_op_num(self):
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.comparison_op(df.a, df.b, op)
df["e"] = self.utils.comparison_op(df.a, 2.0, op)
df["f"] = self.utils.comparison_op(2.0, df.b, op)
df["g"] = self.utils.comparison_op(2.0, 3.0, op)
df["h"] = self.utils.comparison_op(2.0, df.c, op)
df["i"] = self.utils.comparison_op(df.a, df.c, op)
df["j"] = self.utils.comparison_op(df.c, df.c, op)
assert_duck_eq(
self.to_pd(df[list("defghij")]),
f"""
SELECT
a{op}b AS d, a{op}2.0 AS e, 2.0{op}b AS f,
2.0{op}3.0 AS g, 2.0{op}c AS h, a{op}c AS i,
c{op}c AS j
FROM pdf
""",
pdf=pdf,
check_order=False,
)
assert self.utils.comparison_op(None, None, op) is None
pdf = pd.DataFrame(
dict(
a=[1.0, 2.0, 3.0, 4.0],
b=[2.0, 2.0, 0.1, 2.0],
c=[2.0, None, 2.0, float("nan")],
)
)
test_(pdf, "<")
test_(pdf, "<=")
test_(pdf, "==")
test_(pdf, "!=")
test_(pdf, ">")
test_(pdf, ">=")
def test_comparison_op_str(self):
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.comparison_op(df.a, df.b, op)
df["e"] = self.utils.comparison_op(df.a, "y", op)
df["f"] = self.utils.comparison_op("y", df.b, op)
df["g"] = self.utils.comparison_op("y", "z", op)
df["h"] = self.utils.comparison_op("y", df.c, op)
df["i"] = self.utils.comparison_op(df.a, df.c, op)
df["j"] = self.utils.comparison_op(df.c, df.c, op)
assert_duck_eq(
self.to_pd(df[list("defghij")]),
f"""
SELECT
a{op}b AS d, a{op}'y' AS e, 'y'{op}b AS f,
'y'{op}'z' AS g, 'y'{op}c AS h, a{op}c AS i,
c{op}c AS j
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=["xx", None, "x"],
b=[None, "t", "tt"],
c=["zz", None, "z"],
)
)
test_(pdf, "<")
test_(pdf, "<=")
test_(pdf, "==")
test_(pdf, "!=")
test_(pdf, ">")
test_(pdf, ">=")
def test_comparison_op_time(self):
t = datetime(2019, 1, 1)
x = datetime(2020, 1, 1)
y = datetime(2020, 1, 2)
z = datetime(2020, 1, 3)
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.comparison_op(df.a, df.b, op)
df["e"] = self.utils.comparison_op(df.a, y, op)
df["f"] = self.utils.comparison_op(y, df.b, op)
df["g"] = self.utils.comparison_op(y, z, op)
df["h"] = self.utils.comparison_op(y, df.c, op)
df["i"] = self.utils.comparison_op(df.a, df.c, op)
df["j"] = self.utils.comparison_op(df.c, df.c, op)
assert_duck_eq(
self.to_pd(df[list("defghij")]),
f"""
SELECT
a{op}b AS d, a{op}'{y}' AS e, '{y}'{op}b AS f,
'{y}'{op}'{z}' AS g, '{y}'{op}c AS h, a{op}c AS i,
c{op}c AS j
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=[x, None, x],
b=[None, t, t],
c=[z, z, None],
)
)
test_(pdf, "<")
test_(pdf, "<=")
test_(pdf, "==")
test_(pdf, "!=")
test_(pdf, ">")
test_(pdf, ">=")
def test_binary_logical_op(self):
def test_(pdf: pd.DataFrame, op: str):
df = self.to_df(pdf)
df["d"] = self.utils.binary_logical_op(df.a, df.b, op)
df["e"] = self.utils.binary_logical_op(df.a, True, op)
df["f"] = self.utils.binary_logical_op(True, df.b, op)
df["g"] = self.utils.binary_logical_op(df.a, False, op)
df["h"] = self.utils.binary_logical_op(False, df.b, op)
df["i"] = self.utils.binary_logical_op(True, False, op)
df["j"] = self.utils.binary_logical_op(True, None, op)
df["k"] = self.utils.binary_logical_op(False, None, op)
df["l"] = self.utils.binary_logical_op(None, None, op)
assert_duck_eq(
self.to_pd(df[list("defghijkl")]),
f"""
SELECT
a {op} b AS d, a {op} TRUE AS e, TRUE {op} b AS f,
a {op} FALSE AS g, FALSE {op} b AS h, TRUE {op} FALSE AS i,
TRUE {op} NULL AS j, FALSE {op} NULL AS k, NULL {op} NULL AS l
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=[True, False, True, False, True, False, None],
b=[False, True, True, False, None, None, None],
)
)
test_(pdf, "and")
test_(pdf, "or")
def test_logical_not(self):
def test_(pdf: pd.DataFrame):
df = self.to_df(pdf)
df["c"] = self.utils.logical_not(df.a)
df["e"] = self.utils.logical_not(True)
df["f"] = self.utils.logical_not(False)
df["g"] = self.utils.logical_not(None)
assert_duck_eq(
self.to_pd(df[list("cefg")]),
"""
SELECT
NOT a AS c, NOT TRUE AS e,
NOT FALSE AS f, NOT NULL AS g
FROM pdf
""",
pdf=pdf,
check_order=False,
)
pdf = pd.DataFrame(dict(a=[True, False, None]))
test_(pdf)
def test_filter_df(self):
def test_(pdf: pd.DataFrame):
df = self.to_df(pdf)
assert_duck_eq(
self.to_pd(self.utils.filter_df(df, df["a"])),
"""
SELECT * FROM pdf WHERE a
""",
pdf=pdf,
check_order=False,
)
test_(pd.DataFrame(dict(a=[True, False], b=[1.0, 2.0])))
test_(pd.DataFrame(dict(a=[False, False], b=[1.0, 2.0])))
test_(pd.DataFrame(dict(a=[1.0, 0.0, None], b=[1.0, 2.0, 3.0])))
test_(pd.DataFrame(dict(a=[float("nan"), 0.0, None], b=[1.0, 2.0, 3.0])))
pdf = pd.DataFrame([[1], [2]], columns=["a"])
df = self.to_df(pdf)
assert_duck_eq(
self.to_pd(self.utils.filter_df(df, True)),
"""
SELECT * FROM pdf WHERE TRUE
""",
pdf=pdf,
check_order=False,
)
assert_duck_eq(
self.to_pd(self.utils.filter_df(df, False)),
"""
SELECT * FROM pdf WHERE FALSE
""",
pdf=pdf,
check_order=False,
)
def test_is_value(self):
assert self.utils.is_value(None, None, True)
assert not self.utils.is_value(None, None, False)
assert not self.utils.is_value(None, True, True)
assert self.utils.is_value(None, True, False)
assert not self.utils.is_value(None, False, True)
assert self.utils.is_value(None, False, False)
assert self.utils.is_value(float("nan"), None, True)
assert not self.utils.is_value(float("nan"), None, False)
assert self.utils.is_value(pd.NaT, None, True)
assert not self.utils.is_value(pd.NaT, None, False)
assert not self.utils.is_value("abc", None, True)
assert self.utils.is_value("abc", None, False)
assert not self.utils.is_value(True, None, True)
assert self.utils.is_value(True, None, False)
assert self.utils.is_value(True, True, True)
assert not self.utils.is_value(True, True, False)
assert not self.utils.is_value(True, False, True)
assert self.utils.is_value(True, False, False)
assert not self.utils.is_value(-1.1, None, True)
assert self.utils.is_value(-1.1, None, False)
assert self.utils.is_value(-1.1, True, True)
assert not self.utils.is_value(-1.1, True, False)
assert not self.utils.is_value(-1.1, False, True)
assert self.utils.is_value(-1.1, False, False)
assert not self.utils.is_value(False, None, True)
assert self.utils.is_value(False, None, False)
assert not self.utils.is_value(False, True, True)
assert self.utils.is_value(False, True, False)
assert self.utils.is_value(False, False, True)
assert not self.utils.is_value(False, False, False)
assert not self.utils.is_value(0, None, True)
assert self.utils.is_value(0, None, False)
assert not self.utils.is_value(0, True, True)
assert self.utils.is_value(0, True, False)
assert self.utils.is_value(0, False, True)
assert not self.utils.is_value(0, False, False)
with raises(NotImplementedError):
self.utils.is_value(0, "x", False)
pdf = pd.DataFrame(dict(a=[True, False, None]))
df = self.to_df(pdf)
df["h"] = self.utils.is_value(df["a"], None, True)
df["i"] = self.utils.is_value(df["a"], None, False)
df["j"] = self.utils.is_value(df["a"], True, True)
df["k"] = self.utils.is_value(df["a"], True, False)
df["l"] = self.utils.is_value(df["a"], False, True)
df["m"] = self.utils.is_value(df["a"], False, False)
assert_pdf_eq(
self.to_pd(df[list("hijklm")]),
pd.DataFrame(
dict(
h=[False, False, True],
i=[True, True, False],
j=[True, False, False],
k=[False, True, True],
l=[False, True, False],
m=[True, False, True],
),
),
check_order=False,
)
def test_is_in(self):
assert self.utils.is_in(None, [None, 1], True) is None
assert self.utils.is_in(None, [None, 1], False) is None
assert self.utils.is_in(None, ["a", "b"], True) is None
assert self.utils.is_in(None, ["a", "b"], False) is None
assert self.utils.is_in(True, [False, True], True)
assert not self.utils.is_in(True, [False, True], False)
assert self.utils.is_in(False, [None, False], True)
assert not self.utils.is_in(False, [None, False], False)
assert self.utils.is_in(True, [None, False], True) is None
assert self.utils.is_in(True, [None, False], False) is None
assert self.utils.is_in(1, [2, 1], True)
assert not self.utils.is_in(1, [2, 1], False)
assert self.utils.is_in(1, [None, 1], True)
assert not self.utils.is_in(1, [None, 1], False)
assert self.utils.is_in(1, [None, 2], True) is None
assert self.utils.is_in(1, [None, 2], False) is None
assert self.utils.is_in(1.1, [2.2, 1.1], True)
assert not self.utils.is_in(1.1, [2.2, 1.1], False)
assert self.utils.is_in(1.1, [None, 1.1], True)
assert not self.utils.is_in(1.1, [None, 1.1], False)
assert self.utils.is_in(1.1, [None, 2.2], True) is None
assert self.utils.is_in(1.1, [None, 2.2], False) is None
assert self.utils.is_in("aa", ["bb", "aa"], True)
assert not self.utils.is_in("aa", ["bb", "aa"], False)
assert self.utils.is_in("aa", [None, "aa"], True)
assert not self.utils.is_in("aa", [None, "aa"], False)
assert self.utils.is_in("aa", [None, "bb"], True) is None
assert self.utils.is_in("aa", [None, "b"], False) is None
assert self.utils.is_in(
date(2020, 1, 1), [date(2020, 1, 2), date(2020, 1, 1)], True
)
assert not self.utils.is_in(
date(2020, 1, 1), [date(2020, 1, 2), date(2020, 1, 1)], False
)
assert self.utils.is_in(date(2020, 1, 1), [pd.NaT, date(2020, 1, 1)], True)
assert not self.utils.is_in(
date(2020, 1, 1), [None, date(2020, 1, 1)], False
)
assert (
self.utils.is_in(date(2020, 1, 1), [pd.NaT, date(2020, 1, 2)], True)
is None
)
assert (
self.utils.is_in(date(2020, 1, 1), [None, date(2020, 1, 2)], False)
is None
)
def test_is_in_sql(self):
pdf = pd.DataFrame(
dict(
a=[True, False, None],
b=[1, 2, None],
c=[1.1, 2.2, None],
d=["aa", "bb", None],
e=[date(2020, 1, 1), date(2020, 1, 2), None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.is_in(df["a"], [False, None], True)
df["i"] = self.utils.is_in(df["a"], [False, None], False)
df["j"] = self.utils.is_in(df["b"], [1, 3, None], True)
df["k"] = self.utils.is_in(df["b"], [1, 3, None], False)
df["l"] = self.utils.is_in(df["c"], [1.1, 3.3, None], True)
df["m"] = self.utils.is_in(df["c"], [1.1, 3.3, None], False)
df["n"] = self.utils.is_in(df["d"], ["aa", "cc", None], True)
df["o"] = self.utils.is_in(df["d"], ["aa", "cc", None], False)
df["p"] = self.utils.is_in(
df["e"], [date(2020, 1, 1), date(2020, 1, 3), None], True
)
df["q"] = self.utils.is_in(
df["e"], [date(2020, 1, 1), date(2020, 1, 3), None], False
)
assert_duck_eq(
self.to_pd(df[list("jklmnopq")]),
"""
SELECT
-- a IN (FALSE, NULL) AS h,
-- a NOT IN (FALSE, NULL) AS i,
b IN (3, 1, NULL) AS j,
b NOT IN (3, 1, NULL) AS k,
c IN (3.3, 1.1, NULL) AS l,
c NOT IN (3.3, 1.1, NULL) AS m,
d IN ('cc', 'aa', NULL) AS n,
d NOT IN ('cc', 'aa', NULL) AS o,
e IN ('2020-01-03', '2020-01-01', NULL) AS p,
e NOT IN ('2020-01-03', '2020-01-01', NULL) AS q
FROM a
""",
a=pdf,
check_order=False,
)
pdf = pd.DataFrame(
dict(
a=[1.1, 2.2, None],
b=[1.1, None, None],
c=[None, 2.2, None],
d=[3.3, None, None],
e=[None, 4.4, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.is_in(df["a"], [df["b"], df["c"]], True)
df["i"] = self.utils.is_in(df["a"], [df["b"], df["c"]], False)
df["j"] = self.utils.is_in(df["a"], [df["d"], df["e"]], True)
df["k"] = self.utils.is_in(df["a"], [df["d"], df["e"]], False)
df["l"] = self.utils.is_in(df["a"], [df["b"], df["d"], None], True)
df["m"] = self.utils.is_in(df["a"], [df["b"], df["d"], None], False)
assert_duck_eq(
self.to_pd(df[list("hijklm")]),
"""
SELECT
a IN (b, c) AS h,
a NOT IN (b, c) AS i,
a IN (d, e) AS j,
a NOT IN (d, e) AS k,
a IN (b, d, NULL) AS l,
a NOT IN (b, d, NULL) AS m
FROM a
""",
a=pdf,
check_order=False,
)
def test_is_between(self):
# if col is null, then the result is null
for a in [1, 2, None]:
for b in [1, 2, None]:
for p in [True, False]:
assert self.utils.is_between(None, a, b, p) is None
# one side is none and the result can't be determined, so null
assert self.utils.is_between(2, None, 2, True) is None
assert self.utils.is_between(2, None, 2, False) is None
assert self.utils.is_between(3, 2, None, True) is None
assert self.utils.is_between(3, 2, None, False) is None
# one side is none but the result is still deterministic
assert not self.utils.is_between(3, None, 2, True)
assert self.utils.is_between(3, None, 2, False)
assert not self.utils.is_between(1, 2, None, True)
assert self.utils.is_between(1, 2, None, False)
# if lower and upper are both nulls, the result is null
assert self.utils.is_between(3, None, None, True) is None
assert self.utils.is_between(3, None, None, False) is None
# happy paths
assert self.utils.is_between(1, 1, 2, True)
assert not self.utils.is_between(2, 1, 2, False)
assert not self.utils.is_between(0, 1, 2, True)
assert self.utils.is_between(0, 1, 2, False)
assert not self.utils.is_between(3, 1, 2, True)
assert self.utils.is_between(3, 1, 2, False)
assert self.utils.is_between("bb", "bb", "cc", True)
assert not self.utils.is_between("cc", "bb", "cc", False)
assert not self.utils.is_between("aa", "bb", "cc", True)
assert self.utils.is_between("aa", "bb", "cc", False)
assert self.utils.is_between(
date(2020, 1, 2), date(2020, 1, 2), date(2020, 1, 3), True
)
assert not self.utils.is_between(
date(2020, 1, 3), date(2020, 1, 2), date(2020, 1, 3), False
)
assert not self.utils.is_between(
date(2020, 1, 1), date(2020, 1, 2), date(2020, 1, 3), True
)
assert self.utils.is_between(
date(2020, 1, 1), date(2020, 1, 2), date(2020, 1, 3), False
)
def test_is_between_sql(self):
pdf = make_rand_df(100, a=(float, 20), b=(float, 20), c=(float, 20))
# pdf = make_rand_df(5, a=(float, 2), b=(float, 2), c=(float, 2))
print(pdf)
df = self.to_df(pdf)
df["h"] = self.utils.is_between(df["a"], df["b"], df["c"], True)
df["i"] = self.utils.is_between(df["a"], df["b"], df["c"], False)
df["j"] = self.utils.is_between(None, df["b"], df["c"], True)
df["k"] = self.utils.is_between(None, df["b"], df["c"], False)
df["l"] = self.utils.is_between(df["a"], df["b"], None, True)
df["m"] = self.utils.is_between(df["a"], df["b"], None, False)
df["n"] = self.utils.is_between(df["a"], None, df["c"], True)
df["o"] = self.utils.is_between(df["a"], None, df["c"], False)
df["p"] = self.utils.is_between(df["a"], 0.5, df["c"], True)
df["q"] = self.utils.is_between(df["a"], 0.5, df["c"], False)
df["r"] = self.utils.is_between(df["a"], df["b"], 0.5, True)
df["s"] = self.utils.is_between(df["a"], df["b"], 0.5, False)
assert_duck_eq(
self.to_pd(df[list("hijklmnopqrs")]),
"""
SELECT
a BETWEEN b AND c AS h,
a NOT BETWEEN b AND c AS i,
NULL BETWEEN b AND c AS j,
NULL NOT BETWEEN b AND c AS k,
a BETWEEN b AND NULL AS l,
a NOT BETWEEN b AND NULL AS m,
a BETWEEN NULL AND c AS n,
a NOT BETWEEN NULL AND c AS o,
a BETWEEN 0.5 AND c AS p,
a NOT BETWEEN 0.5 AND c AS q,
a BETWEEN b AND 0.5 AS r,
a NOT BETWEEN b AND 0.5 AS s
FROM a
""",
a=pdf,
check_order=False,
)
def test_cast_coalesce_sql(self):
pdf = make_rand_df(100, a=(float, 50), b=(float, 50), c=(float, 50))
df = self.to_df(pdf)
df["g"] = self.utils.coalesce([None])
df["h"] = self.utils.coalesce([None, 10.1, None])
df["i"] = self.utils.coalesce([df["a"], 10.1])
df["j"] = self.utils.coalesce([10.1, df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], 10.1])
assert_duck_eq(
self.to_pd(df[list("ghijklmn")]),
"""
SELECT
COALESCE(NULL) AS g,
COALESCE(NULL, 10.1, NULL) AS h,
COALESCE(a, 10.1) AS i,
COALESCE(10.1, a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,10.1) AS n
FROM a
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(100, a=(bool, 50), b=(bool, 50), c=(bool, 50))
df = self.to_df(pdf)
df["h"] = self.utils.coalesce([None, False, None])
df["i"] = self.utils.coalesce([df["a"], False])
df["j"] = self.utils.coalesce([False, df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], False])
assert_duck_eq(
self.to_pd(df[list("hijklmn")]),
"""
SELECT
COALESCE(NULL, FALSE) AS h,
COALESCE(a, FALSE) AS i,
COALESCE(FALSE, a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,FALSE) AS n
FROM (SELECT
CAST(a AS BOOLEAN) a,
CAST(b AS BOOLEAN) b,
CAST(c AS BOOLEAN) c FROM a)
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(100, a=(int, 50), b=(int, 50), c=(int, 50))
df = self.to_df(pdf)
df["h"] = self.utils.coalesce([None, 10, None])
df["i"] = self.utils.coalesce([df["a"], 10])
df["j"] = self.utils.coalesce([10, df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], 10])
assert_duck_eq(
self.to_pd(df[list("hijklmn")]),
"""
SELECT
COALESCE(NULL, 10) AS h,
COALESCE(a, 10) AS i,
COALESCE(10, a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,10) AS n
FROM (SELECT
CAST(a AS INTEGER) a,
CAST(b AS INTEGER) b,
CAST(c AS INTEGER) c FROM a)
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(100, a=(str, 50), b=(str, 50), c=(str, 50))
df = self.to_df(pdf)
df["h"] = self.utils.coalesce([None, "xx", None])
df["i"] = self.utils.coalesce([df["a"], "xx"])
df["j"] = self.utils.coalesce(["xx", df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], "xx"])
assert_duck_eq(
self.to_pd(df[list("hijklmn")]),
"""
SELECT
COALESCE(NULL, 'xx') AS h,
COALESCE(a, 'xx') AS i,
COALESCE('xx', a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,'xx') AS n
FROM a
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(
100, a=(datetime, 50), b=(datetime, 50), c=(datetime, 50)
)
ct = datetime(2020, 1, 1, 15)
df = self.to_df(pdf)
df["h"] = self.utils.coalesce([None, ct, None])
df["i"] = self.utils.coalesce([df["a"], ct])
df["j"] = self.utils.coalesce([ct, df["a"]])
df["k"] = self.utils.coalesce([df["a"], None])
df["l"] = self.utils.coalesce([None, df["a"]])
df["m"] = self.utils.coalesce([df["a"], df["b"], df["c"]])
df["n"] = self.utils.coalesce([df["a"], df["b"], df["c"], ct])
assert_duck_eq(
self.to_pd(df[list("hijklmn")]),
"""
SELECT
COALESCE(NULL, TIMESTAMP '2020-01-01 15:00:00') AS h,
COALESCE(a, TIMESTAMP '2020-01-01 15:00:00') AS i,
COALESCE(TIMESTAMP '2020-01-01 15:00:00', a) AS j,
COALESCE(a, NULL) AS k,
COALESCE(NULL, a) AS l,
COALESCE(a,b,c) AS m,
COALESCE(a,b,c,TIMESTAMP '2020-01-01 15:00:00') AS n
FROM a
""",
a=pdf,
check_order=False,
)
def test_case_when(self):
assert 4 == self.utils.case_when(default=4)
assert 3 == self.utils.case_when((False, 1), (2, 3), default=4)
assert 3 == self.utils.case_when((None, 1), (2, 3), default=4)
assert 1 == self.utils.case_when((True, 1), (2, 3), default=4)
assert 4 == self.utils.case_when((False, 1), (False, 3), default=4)
def test_case_when_sql(self):
pdf = make_rand_df(20, a=bool, b=str, c=bool, d=(str, 10), e=(str, 10))
df = self.to_df(pdf)
df["h"] = self.utils.case_when((df["a"], df["b"]), (df["c"], df["d"]))
df["i"] = self.utils.case_when(
(df["a"], df["b"]), (df["c"], df["d"]), default=df["e"]
)
assert_duck_eq(
self.to_pd(df[list("hi")]),
"""
SELECT
CASE WHEN a THEN b WHEN c THEN d END AS h,
CASE WHEN a THEN b WHEN c THEN d ELSE e END AS i
FROM a
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(
20, a=(bool, 10), b=(str, 10), c=(bool, 10), d=(str, 10), e=(str, 10)
)
df = self.to_df(pdf)
df["h"] = self.utils.case_when((df["a"], df["b"]), (df["c"], df["d"]))
df["i"] = self.utils.case_when(
(df["a"], df["b"]), (df["c"], df["d"]), default=df["e"]
)
assert_duck_eq(
self.to_pd(df[list("hi")]),
"""
SELECT
CASE WHEN a THEN b WHEN c THEN d END AS h,
CASE WHEN a THEN b WHEN c THEN d ELSE e END AS i
FROM a
""",
a=pdf,
check_order=False,
)
pdf = make_rand_df(
20,
a=(float, 10),
b=(float, 10),
c=(float, 10),
d=(float, 10),
e=(float, 10),
)
df = self.to_df(pdf)
df["h"] = self.utils.case_when(
(df["a"] > 0.5, df["b"]), ((df["c"] > 0.5) | (df["a"] > 0.3), df["d"])
)
df["i"] = self.utils.case_when(
(df["a"] > 0.5, df["b"]),
((df["c"] > 0.5) | (df["a"] > 0.3), df["d"]),
default=df["e"],
)
df["j"] = self.utils.case_when(
(df["a"] > 0.5, df["b"]),
(df["a"] > 0.5, df["d"]),
default=df["e"],
)
df["k"] = self.utils.case_when(
(None, df["b"]),
(df["a"] > 0.5, df["d"]),
default=df["e"],
)
df["l"] = self.utils.case_when(
(True, 2),
(df["a"] > 0.5, df["d"]),
default=df["e"],
)
df["m"] = self.utils.case_when(
(True, None),
(df["a"] > 0.5, df["d"]),
default=df["e"],
)
assert_duck_eq(
self.to_pd(df[list("hijklm")]),
"""
SELECT
CASE
WHEN a>0.5 THEN b
WHEN c>0.5 OR a>0.3 THEN d END AS h,
CASE
WHEN a>0.5 THEN b
WHEN c>0.5 OR a>0.3 THEN d
ELSE e END AS i,
CASE
WHEN a>0.5 THEN b
WHEN a>0.5 THEN d
ELSE e END AS j,
CASE
WHEN NULL THEN b
WHEN a>0.5 THEN d
ELSE e END AS k,
CASE
WHEN TRUE THEN 2
WHEN a>0.5 THEN d
ELSE e END AS l,
CASE
WHEN TRUE THEN NULL
WHEN a>0.5 THEN d
ELSE e END AS m
FROM a
""",
a=pdf,
check_order=False,
)
def test_like(self):
# nulls
for p in [True, False]:
for i in [True, False]:
assert (
self.utils.like(None, None, ignore_case=i, positive=p) is None
)
assert self.utils.like("x", None, ignore_case=i, positive=p) is None
# empty
assert self.utils.like("", "")
assert not self.utils.like("abc", "")
# simple
assert not self.utils.like("abc", "aBc")
assert self.utils.like("abc", "aBc", ignore_case=True)
# start
assert not self.utils.like("abc", "aB%")
assert not self.utils.like("abc", "aB_")
assert self.utils.like("abc", "aB%", ignore_case=True)
assert self.utils.like("abc", "aB_", ignore_case=True)
# end
assert not self.utils.like("abc", "%Bc")
assert not self.utils.like("abc", "_Bc")
assert self.utils.like("abc", "%Bc", ignore_case=True)
assert self.utils.like("abc", "_Bc", ignore_case=True)
# start end
assert not self.utils.like("abc", "A_c")
assert not self.utils.like("abc", "A%c")
assert self.utils.like("abc", "A_c", ignore_case=True)
assert self.utils.like("abc", "A%c", ignore_case=True)
# contain
assert not self.utils.like("abc", "%B%")
assert not self.utils.like("abc", "_B_")
assert self.utils.like("abc", "%B%", ignore_case=True)
assert self.utils.like("abc", "_B_", ignore_case=True)
# not empty
assert self.utils.like("abc", "_%")
assert self.utils.like("abc", "%_")
assert self.utils.like("abc", "%_%")
# any
assert self.utils.like("abc", "%")
def test_like_sql(self):
pdf = pd.DataFrame(
dict(a=["abc", "ABC", "abd", "aBd", "", "ab\\%\\_c", None])
)
df = self.to_df(pdf)
df["h"] = self.utils.like(df["a"], None)
df["i"] = self.utils.like(df["a"], "")
df["j"] = self.utils.like(df["a"], "abc", ignore_case=True)
df["k"] = self.utils.like(df["a"], "aBc", ignore_case=False)
df["l"] = self.utils.like(df["a"], "ab%", ignore_case=True)
df["m"] = self.utils.like(df["a"], "aB%", ignore_case=False)
df["n"] = self.utils.like(df["a"], "%bc", ignore_case=True)
df["o"] = self.utils.like(df["a"], "%bc", ignore_case=False)
df["p"] = self.utils.like(df["a"], "a%c", ignore_case=True)
df["q"] = self.utils.like(df["a"], "a%c", ignore_case=False)
df["r"] = self.utils.like(df["a"], "%bc%", ignore_case=True)
df["s"] = self.utils.like(df["a"], "%bc%", ignore_case=False)
df["t"] = self.utils.like(df["a"], "%_")
df["u"] = self.utils.like(df["a"], "_%")
df["v"] = self.utils.like(df["a"], "%_%")
df["w"] = self.utils.like(df["a"], "_a%", ignore_case=True)
df["x"] = self.utils.like(df["a"], "_a%", ignore_case=False)
df["y"] = self.utils.like(df["a"], "%")
assert_duck_eq(
self.to_pd(df[list("hijklmnopqrstuvwxy")]),
"""
SELECT
a LIKE NULL AS h,
a LIKE '' AS i,
a ILIKE 'abc' AS j,
a LIKE 'aBc' AS k,
a ILIKE 'ab%' AS l,
a LIKE 'aB%' AS m,
a ILIKE '%bc' AS n,
a LIKE '%bc' AS o,
a ILIKE 'a%c' AS p,
a LIKE 'a%c' AS q,
a ILIKE '%bc%' AS r,
a LIKE '%bc%' AS s,
a LIKE '%_' AS t,
a LIKE '_%' AS u,
a LIKE '%_%' AS v,
a ILIKE '_a%' AS w,
a LIKE '_a%' AS x,
a LIKE '%' AS y
FROM a
""",
a=pdf,
check_order=False,
)
df = self.to_df(pdf)
df["h"] = self.utils.like(df["a"], None, positive=False)
df["i"] = self.utils.like(df["a"], "", positive=False)
df["j"] = self.utils.like(df["a"], "abc", ignore_case=True, positive=False)
df["k"] = self.utils.like(df["a"], "aBc", ignore_case=False, positive=False)
df["l"] = self.utils.like(df["a"], "ab%", ignore_case=True, positive=False)
df["m"] = self.utils.like(df["a"], "aB%", ignore_case=False, positive=False)
df["n"] = self.utils.like(df["a"], "%bc", ignore_case=True, positive=False)
df["o"] = self.utils.like(df["a"], "%bc", ignore_case=False, positive=False)
df["p"] = self.utils.like(df["a"], "a%c", ignore_case=True, positive=False)
df["q"] = self.utils.like(df["a"], "a%c", ignore_case=False, positive=False)
df["r"] = self.utils.like(df["a"], "%bc%", ignore_case=True, positive=False)
df["s"] = self.utils.like(
df["a"], "%bc%", ignore_case=False, positive=False
)
df["t"] = self.utils.like(df["a"], "%_", positive=False)
df["u"] = self.utils.like(df["a"], "_%", positive=False)
df["v"] = self.utils.like(df["a"], "%_%", positive=False)
df["w"] = self.utils.like(df["a"], "_a%", ignore_case=True, positive=False)
df["x"] = self.utils.like(df["a"], "_a%", ignore_case=False, positive=False)
df["y"] = self.utils.like(df["a"], "%", positive=False)
assert_duck_eq(
self.to_pd(df[list("hijklmnopqrstuvwxy")]),
"""
SELECT
a NOT LIKE NULL AS h,
a NOT LIKE '' AS i,
a NOT ILIKE 'abc' AS j,
a NOT LIKE 'aBc' AS k,
a NOT ILIKE 'ab%' AS l,
a NOT LIKE 'aB%' AS m,
a NOT ILIKE '%bc' AS n,
a NOT LIKE '%bc' AS o,
a NOT ILIKE 'a%c' AS p,
a NOT LIKE 'a%c' AS q,
a NOT ILIKE '%bc%' AS r,
a NOT LIKE '%bc%' AS s,
a NOT LIKE '%_' AS t,
a NOT LIKE '_%' AS u,
a NOT LIKE '%_%' AS v,
a NOT ILIKE '_a%' AS w,
a NOT LIKE '_a%' AS x,
a NOT LIKE '%' AS y
FROM a
""",
a=pdf,
check_order=False,
)
def test_cast_constant(self):
assert self.utils.cast(None, bool) is None
assert self.utils.cast(True, bool)
assert not self.utils.cast(False, bool)
assert self.utils.cast(float("nan"), bool) is None
assert not self.utils.cast(0, bool)
assert 1 == self.utils.cast(1, bool)
assert 1 == self.utils.cast(-2, bool)
assert 0 == self.utils.cast(0.0, bool)
assert 1 == self.utils.cast(0.1, bool)
assert 1 == self.utils.cast(-0.2, bool)
assert 1 == self.utils.cast(float("inf"), bool)
assert 1 == self.utils.cast(float("-inf"), bool)
assert self.utils.cast("nan", bool) is None
assert 1 == self.utils.cast("tRue", bool)
assert 0 == self.utils.cast("fAlse", bool)
assert self.utils.cast(None, int) is None
assert 1 == self.utils.cast(True, int)
assert 0 == self.utils.cast(False, int)
assert self.utils.cast(float("nan"), int) is None
assert 0 == self.utils.cast(0, int)
assert 10 == self.utils.cast(10, int)
assert 0 == self.utils.cast(0.0, int)
assert 1 == self.utils.cast(1.1, int)
assert -2 == self.utils.cast(-2.2, int)
assert 0 == self.utils.cast("0", int)
assert 10 == self.utils.cast("10", int)
assert 0 == self.utils.cast("0.0", int)
assert 1 == self.utils.cast("1.1", int)
assert -2 == self.utils.cast("-2.2", int)
assert self.utils.cast("nan", int) is None
with raises(SlideCastError):
assert self.utils.cast(float("inf"), int)
with raises(SlideCastError):
assert self.utils.cast(float("-inf"), int)
assert self.utils.cast(None, float) is None
assert 1.0 == self.utils.cast(True, float)
assert 0.0 == self.utils.cast(False, float)
assert self.utils.cast(float("nan"), float) is None
assert 0.0 == self.utils.cast(0, float)
assert 10.0 == self.utils.cast(10, float)
assert 0.0 == self.utils.cast(0.0, float)
assert 1.1 == self.utils.cast(1.1, float)
assert -2.2 == self.utils.cast(-2.2, float)
assert 0.0 == self.utils.cast("0", float)
assert 10.0 == self.utils.cast("10", float)
assert 0.0 == self.utils.cast("0.0", float)
assert 1.1 == self.utils.cast("1.1", float)
assert -2.2 == self.utils.cast("-2.2", float)
assert self.utils.cast("nan", float) is None
assert np.isinf(self.utils.cast(float("inf"), float))
assert np.isinf(self.utils.cast(float("-inf"), float))
assert self.utils.cast(None, str) is None
assert "true" == self.utils.cast(True, str)
assert "false" == self.utils.cast(False, str)
assert "true" == self.utils.cast(-10, str, bool)
assert "false" == self.utils.cast(0, str, bool)
assert "10" == self.utils.cast(10, str)
assert "0" == self.utils.cast(0, str)
assert "10.0" == self.utils.cast(10.0, str)
assert "-10.0" == self.utils.cast(-10.0, str)
assert self.utils.cast(float("nan"), str) is None
assert "inf" == self.utils.cast(float("inf"), str)
assert "-inf" == self.utils.cast(float("-inf"), str)
assert "xy" == self.utils.cast("xy", str)
assert isinstance(self.utils.cast(date(2020, 1, 1), str), str)
assert "2020-01-01" == self.utils.cast(date(2020, 1, 1), str)
assert "2020-01-01 15:00:00" == self.utils.cast(
datetime(2020, 1, 1, 15), str
)
assert self.utils.cast(pd.NaT, str) is None
assert self.utils.cast(None, "date") is None
assert self.utils.cast(None, "datetime") is None
assert self.utils.cast("nat", "date") is None
assert self.utils.cast("nat", "datetime") is None
assert date(2020, 1, 1) == self.utils.cast("2020-01-01", "date")
assert date(2020, 1, 1) == self.utils.cast("2020-01-01 15:00:00", "date")
assert datetime(2020, 1, 1) == self.utils.cast("2020-01-01", "datetime")
assert datetime(2020, 1, 1, 15) == self.utils.cast(
"2020-01-01 15:00:00", "datetime"
)
def test_cast_bool(self):
# happy path
pdf = pd.DataFrame(
dict(
a=[True, False, True],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
df["i"] = self.utils.cast(df.a, float)
df["j"] = self.utils.cast(df.a, bool)
df["k"] = self.utils.cast(df.a, str)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[1, 0, 1],
i=[1.0, 0.0, 1.0],
j=[True, False, True],
k=["true", "false", "true"],
),
),
check_order=False,
)
# from bool with None
pdf = pd.DataFrame(
dict(
a=[True, False, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int, bool)
df["i"] = self.utils.cast(df.a, float)
df["j"] = self.utils.cast(df.a, bool, bool)
df["k"] = self.utils.cast(df.a, str, bool)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[1, 0, None],
i=[1.0, 0.0, None],
j=[True, False, None],
k=["true", "false", None],
),
),
check_order=False,
)
# from float with None
pdf = pd.DataFrame(
dict(
a=[2.0, 0.0, -2.0, None, float("nan")],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, bool)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[True, False, True, None, None],
),
),
check_order=False,
)
# from int
pdf = pd.DataFrame(
dict(
a=[2, 0, -2],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, bool)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[True, False, True],
),
),
check_order=False,
)
# from bool with None to various
pdf = pd.DataFrame(
dict(
a=[1.0, 0.0, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int, bool)
df["i"] = self.utils.cast(df.a, float, bool)
df["j"] = self.utils.cast(df.a, bool, bool)
df["k"] = self.utils.cast(df.a, str, bool)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[1, 0, None],
i=[1.0, 0.0, None],
j=[True, False, None],
k=["true", "false", None],
),
),
check_order=False,
)
# from strings
pdf = pd.DataFrame(
dict(
a=["tRue", "fAlse", "true"],
b=["tRue", "fAlse", None],
c=["1", "0", "abc"],
d=["1.0", "0.0", "abc"],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, bool, str)
df["i"] = self.utils.cast(df.b, bool, str)
df["j"] = self.utils.cast(df.c, bool, str)
df["k"] = self.utils.cast(df.d, bool, str)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[True, False, True],
i=[True, False, None],
j=[True, False, None],
k=[True, False, None],
),
),
check_order=False,
)
# invalid
pdf = pd.DataFrame(
dict(
a=[datetime(2020, 1, 1)],
)
)
df = self.to_df(pdf)
with raises(SlideCastError):
df["h"] = self.utils.cast(df.a, bool)
def test_cast_int(self):
# happy path
pdf = pd.DataFrame(
dict(
a=[True, False, True],
b=[2, 3, 4],
c=[1.1, 2.2, 3.3],
d=["1", "2", "3"],
e=["5.5", "6.6", "7.7"],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
df["i"] = self.utils.cast(df.b, int)
df["j"] = self.utils.cast(df.c, int)
df["k"] = self.utils.cast(df.d, int)
df["l"] = self.utils.cast(df.e, int)
assert_pdf_eq(
self.to_pd(df[list("hijkl")]),
pd.DataFrame(
dict(
h=[1, 0, 1],
i=[2, 3, 4],
j=[1, 2, 3],
k=[1, 2, 3],
l=[5, 6, 7],
),
),
check_order=False,
)
# from int with None
pdf = pd.DataFrame(
dict(
a=[2, 3, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2, 3, None],
),
),
check_order=False,
)
# from float with None
pdf = pd.DataFrame(
dict(
a=[2.1, float("nan"), None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2, None, None],
),
),
check_order=False,
)
# from string with None
pdf = pd.DataFrame(
dict(
a=["2.1", "naN", None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, int)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2, None, None],
),
),
check_order=False,
)
# overflow, TODO: pandas can't raise exception
pdf = pd.DataFrame(
dict(
a=[10000, -10000],
)
)
# df = self.to_df(pdf)
# with raises(SlideCastError):
# df["h"] = self.utils.cast(df.a, "int8")
# invalid
pdf = pd.DataFrame(
dict(
a=[datetime(2020, 1, 1)],
)
)
df = self.to_df(pdf)
with raises(SlideCastError):
self.utils.series_to_array(self.utils.cast(df.a, int))
def test_cast_int_overflow(self):
pdf = pd.DataFrame(
dict(
a=[2.1, float("inf"), None],
)
)
df = self.to_df(pdf)
with raises(SlideCastError):
self.utils.series_to_array(self.utils.cast(df.a, int))
def test_cast_float(self):
# happy path
pdf = pd.DataFrame(
dict(
a=[True, False, True],
b=[2, 3, 4],
c=[1.1, 2.2, 3.3],
d=[2.0, 0.0, -1.0],
e=["5.5", "6.6", "7.7"],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, float)
df["i"] = self.utils.cast(df.b, float)
df["j"] = self.utils.cast(df.c, float)
df["l"] = self.utils.cast(df.e, float)
assert_pdf_eq(
self.to_pd(df[list("hijl")]),
pd.DataFrame(
dict(
h=[1, 0, 1],
i=[2, 3, 4],
j=[1.1, 2.2, 3.3],
l=[5.5, 6.6, 7.7],
),
),
check_order=False,
)
# from float with None
pdf = pd.DataFrame(
dict(
a=[2.1, float("nan"), float("inf"), None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, "float32")
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2.1, float("nan"), float("inf"), None],
),
).astype(np.float32),
check_order=False,
)
# from string with None
pdf = pd.DataFrame(
dict(
a=["2.1", "naN", "inf", "-inf", None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, float)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[2.1, None, float("inf"), float("-inf"), None],
),
),
check_order=False,
)
def test_cast_str(self):
# happy path
pdf = pd.DataFrame(
dict(
a=[False, True, True],
b=[2, 3, 4],
c=[1.1, 2.2, 3.3],
d=[
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
],
e=["aa", "ab", "ac"],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str)
df["i"] = self.utils.cast(df.b, str)
df["j"] = self.utils.cast(df.c, str)
df["k"] = self.utils.cast(df.d, str)
df["l"] = self.utils.cast(df.e, str)
assert_pdf_eq(
self.to_pd(df[list("hijkl")]),
pd.DataFrame(
dict(
h=["false", "true", "true"],
i=["2", "3", "4"],
j=["1.1", "2.2", "3.3"],
k=["2020-01-02", "2020-01-03", "2020-01-04"],
l=["aa", "ab", "ac"],
),
),
check_order=False,
)
# from bool with None
pdf = pd.DataFrame(
dict(
a=[True, False, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str, bool)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["true", "false", None],
),
),
check_order=False,
)
# from float with None
pdf = pd.DataFrame(
dict(
a=[2.1, float("nan"), float("inf"), None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["2.1", None, "inf", None],
),
),
check_order=False,
)
# from int with None
pdf = pd.DataFrame(
dict(
a=[1, None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str, int)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["1", None],
),
),
check_order=False,
)
# from timestamp with None
pdf = pd.DataFrame(
dict(
a=[
datetime(2020, 1, 1),
datetime(2020, 1, 1, 15, 2, 3),
pd.NaT,
None,
],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str)
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["2020-01-01 00:00:00", "2020-01-01 15:02:03", None, None],
),
),
check_order=False,
)
# from date with None
pdf = pd.DataFrame(
dict(
a=[
date(2020, 1, 1),
date(2020, 1, 2),
pd.NaT,
None,
],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, str, "date")
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=["2020-01-01", "2020-01-02", None, None],
),
),
check_order=False,
)
def test_cast_time(self):
# happy path
pdf = pd.DataFrame(
dict(
a=["2020-01-01", "2020-01-02", "2020-01-03"],
b=[
"2020-01-01 01:00:00",
"2020-01-02 14:00:00",
"2020-01-03 15:00:00",
],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, date)
df["i"] = self.utils.cast(df.a, datetime)
df["j"] = self.utils.cast(df.b, "date")
df["k"] = self.utils.cast(df.b, datetime)
assert_pdf_eq(
self.to_pd(df[list("hijk")]),
pd.DataFrame(
dict(
h=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
],
i=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
],
j=[
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
],
k=[
datetime(2020, 1, 1, 1),
datetime(2020, 1, 2, 14),
datetime(2020, 1, 3, 15),
],
),
),
check_order=False,
)
# str -> date with None
pdf = pd.DataFrame(
dict(
a=["2020-01-01 01:00:00", "2020-01-02 00:00:00", None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, "date")
# assert_pdf_eq(
# self.to_pd(df[list("h")]),
# pd.DataFrame(
# dict(
# h=[datetime(2020, 1, 1), datetime(2020, 1, 2), None],
# ),
# ),
# check_order=False,
# )
# str -> datetime with None
pdf = pd.DataFrame(
dict(
a=["2020-01-01 11:00:00", "2020-01-02 12:00:00", None],
)
)
df = self.to_df(pdf)
df["h"] = self.utils.cast(df.a, "datetime")
assert_pdf_eq(
self.to_pd(df[list("h")]),
pd.DataFrame(
dict(
h=[datetime(2020, 1, 1, 11), datetime(2020, 1, 2, 12), None],
),
),
check_order=False,
)
def test_cast_df(self):
a = pd.DataFrame(dict(a=[1, 2, None], b=[True, None, False]))
df = self.utils.cast_df(
self.to_df(a.convert_dtypes()), Schema("a:int,b:bool").pa_schema
)
assert pd.Int32Dtype() == df["a"].dtype
assert pd.BooleanDtype() == df["b"].dtype
df = self.utils.cast_df(self.to_df(a), Schema("a:str,b:str").pa_schema)
assert pd.StringDtype() == df["a"].dtype
assert pd.StringDtype() == df["b"].dtype
# with input hint
a = pd.DataFrame(dict(a=[1, 2, None], b=[None, None, None]))
df = self.utils.cast_df(
self.to_df(a),
Schema("a:double,b:int").pa_schema,
Schema("a:int,b:double").pa_schema,
)
assert pd.api.types.is_float_dtype(df["a"].dtype)
assert pd.api.types.is_integer_dtype(df["b"].dtype)
# empty
a = pd.DataFrame(dict(a=[], b=[]))
df = self.utils.cast_df(self.to_df(a), Schema("a:double,b:int").pa_schema)
assert pd.api.types.is_float_dtype(df["a"].dtype)
assert pd.api.types.is_integer_dtype(df["b"].dtype)
# empty + input hint
a = pd.DataFrame(dict(a=[], b=[]))
df = self.utils.cast_df(
self.to_df(a),
Schema("a:double,b:int").pa_schema,
Schema("a:int,b:double").pa_schema,
)
assert pd.api.types.is_float_dtype(df["a"].dtype)
assert pd.api.types.is_integer_dtype(df["b"].dtype)
def test_cols_to_df(self):
df = self.to_df([["a", 1]], "a:str,b:long")
res = self.utils.cols_to_df([df["b"], df["a"]])
assert_pdf_eq(
self.to_pd(res), self.to_pd(self.to_df([[1, "a"]], "b:long,a:str"))
)
res = self.utils.cols_to_df([df["b"], df["a"]], ["x", "y"])
assert_pdf_eq(
self.to_pd(res), self.to_pd(self.to_df([[1, "a"]], "x:long,y:str"))
)
res = self.utils.cols_to_df([123, df["a"]], names=["x", "y"])
assert_pdf_eq(
self.to_pd(res), self.to_pd(self.to_df([[123, "a"]], "x:long,y:str"))
)
with raises(SlideInvalidOperation):
res = self.utils.cols_to_df([123, 456], names=["x", "y"])
def test_to_schema(self):
df = self.to_df([[1.0, 2], [2.0, 3]])
raises(ValueError, lambda: self.utils.to_schema(df))
df = self.to_df([[1.0, 2], [2.1, 3]], columns=["x", "y"])
assert Schema("x:double,y:long") == Schema(self.utils.to_schema(df))
df = self.to_df([["a", 2], ["b", 3]], columns=["x", "y"])
assert Schema("x:str,y:long") == Schema(self.utils.to_schema(df))
df = self.to_df([], columns=["x", "y"])
df = df.astype(dtype={"x": np.int32, "y": np.dtype("object")})
assert [pa.field("x", pa.int32()), pa.field("y", pa.string())] == list(
self.utils.to_schema(df)
)
df = self.to_df([[1, "x"], [2, "y"]], columns=["x", "y"])
df = df.astype(dtype={"x": np.int32, "y": np.dtype("object")})
assert Schema("x:int32,y:str") == Schema(self.utils.to_schema(df))
df = self.to_df([[1, "x"], [2, "y"]], columns=["x", "y"])
df = df.astype(dtype={"x": np.int32, "y": np.dtype(str)})
assert Schema("x:int32,y:str") == Schema(self.utils.to_schema(df))
df = self.to_df([[1, "x"], [2, "y"]], columns=["x", "y"])
df = df.astype(dtype={"x": np.int32, "y": np.dtype("str")})
assert Schema("x:int32,y:str") == Schema(self.utils.to_schema(df))
# timestamp test
df = self.to_df(
[[datetime(2020, 1, 1, 2, 3, 4, 5), datetime(2020, 2, 2)]],
columns=["a", "b"],
)
assert Schema("a:datetime,b:datetime") == Schema(self.utils.to_schema(df))
def test_index_compatible(self):
df = self.to_df([[3.0, 2], [2.1, 3]], columns=["x", "y"])
df = df.nlargest(100, ["x"])
self.utils.ensure_compatible(df)
df["p"] = "p"
df = df.set_index(["p"])
df.index.name = None
raises(
SlideIndexIncompatibleError, lambda: self.utils.ensure_compatible(df)
)
df = df.reset_index(drop=True)
self.utils.ensure_compatible(df)
def test_as_array_iterable(self):
schema = Schema("a:str,b:int").pa_schema
df = self.to_df([], "a:str,b:int")
assert [] == self.utils.as_array(df, schema)
assert [] == self.utils.as_array(df, schema, type_safe=True)
df = self.to_df([["a", 1]], "a:str,b:int")
assert [["a", 1]] == self.utils.as_array(df, schema)
assert [["a", 1]] == self.utils.as_array(df, schema, columns=["a", "b"])
assert [[1, "a"]] == self.utils.as_array(df, schema, columns=["b", "a"])
# prevent pandas auto type casting
schema = Schema("a:double,b:int").pa_schema
df = self.to_df([[1.0, 1.0]], "a:double,b:int")
data = self.utils.as_array(df, schema)
assert [[1.0, 1]] == data
assert isinstance(data[0][0], float)
assert isinstance(data[0][1], int)
assert [[1.0, 1]] == self.utils.as_array(df, schema, columns=["a", "b"])
assert [[1, 1.0]] == self.utils.as_array(df, schema, columns=["b", "a"])
df = self.to_df([[np.float64(1.0), 1.0]], "a:double,b:int")
assert [[1.0, 1]] == self.utils.as_array(df, schema)
assert isinstance(self.utils.as_array(df, schema)[0][0], float)
assert isinstance(self.utils.as_array(df, schema)[0][1], int)
schema = Schema("a:datetime,b:int").pa_schema
df = self.to_df(
[[ | pd.Timestamp("2020-01-01") | pandas.Timestamp |
import pandas as pd
import numpy as np
import pickle
import os
import webbrowser
import io
import requests
import pyLDAvis
import pyLDAvis.sklearn
import plotly.express as px
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
# local module imports
from cooksmart.exception_errors import DataFormatError, QueryError
from cooksmart.helpers import is_valid_recipe_df
from cooksmart.constants import urls
class RecipeRecommender:
def __init__(self, filepath=None, max_df=0.6, min_df=2):
"""
Creates an instance of Recipe Recommender
Initialises : filepath, tfidf_vect, data, recipe_ingredient_matrix,
title_tfidf
Reads in formatted csv file and vectorize recipes
Input:
filepath (str)
max_df (float): max document freq accepted by tfidf vectorizer
min_df (float): min document freq accepted by tfidf vectorizer
Output:
None
"""
self.filepath = filepath
if self.filepath is None:
self.data = pd.read_csv(urls["DATA_URL"], error_bad_lines=False)
rec_file = io.BytesIO(requests.get(urls["REC_TOP_URL"]).content)
self.recipe_ingredient_matrix = pickle.load(rec_file)
vec_file = io.BytesIO(requests.get(urls["TFIDF_URL"]).content)
self.tfidf_vect = pickle.load(vec_file)
self.title_tfidf = self.tfidf_vect.transform(
self.data['recipe_name'])
else:
if not os.path.exists(filepath):
raise FileNotFoundError(
f"{filepath} is not a valid path to a dataset")
self.data = | pd.read_csv(filepath, error_bad_lines=False) | pandas.read_csv |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import warnings
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.impute import SimpleImputer as skImputer
from ..utils.stat_utils import which_columns_are_binary
from causallib.estimation import Matching
# TODO: Entire module might be redundant, now that scikit-learn supports missing values
# in its preprocessing: https://scikit-learn.org/stable/whats_new/v0.20.html#highlights
# The only support now needed is:
# 1) Transforming from numpy-array to pandas DataFrame in a pipeline, before specifying a causal model.
# 2) Possible generic support for causallib's additional `a` parameter, along with `X` and `y`.
class StandardScaler(BaseEstimator, TransformerMixin):
"""
Standardize continuous features by removing the mean and scaling to unit variance while allowing nans.
X = (X - X.mean()) / X.std()
"""
def __init__(self, with_mean=True, with_std=True, ignore_nans=True):
"""
Args:
with_mean (bool): Whether to center the data before scaling.
with_std (bool): Whether to scale the data to unit variance.
ignore_nans (bool): Whether to ignore NaNs during calculation.
"""
self.with_mean = with_mean
self.with_std = with_std
self.ignore_nans = ignore_nans
def fit(self, X, y=None):
"""
Compute the mean and std to be used for later scaling.
Args:
X (pd.DataFrame): The data used to compute the mean and standard deviation used for later scaling along the
features axis (axis=0).
y: Passthrough for ``Pipeline`` compatibility.
Returns:
StandardScaler: A fitted standard-scaler
"""
continuous_features = self._get_relevant_features(X)
self._feature_mask_ = continuous_features
if self.with_mean:
means = X.loc[:, self._feature_mask_].mean(skipna=self.ignore_nans)
else:
means = pd.Series(0, index=continuous_features)
self.mean_ = means
if self.with_std:
scales = X.loc[:, self._feature_mask_].std(skipna=self.ignore_nans)
else:
scales = pd.Series(1, index=continuous_features)
self.scale_ = scales
return self
def transform(self, X, y='deprecated'):
"""
Perform standardization by centering and scaling
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
y: Passthrough for ``Pipeline`` compatibility.X:
Returns:
pd.DataFrame: Scaled dataset.
"""
# Taken from the sklearn implementation. Will probably need adjustment when a new scikit-learn version is out:
if not isinstance(y, str) or y != 'deprecated':
warnings.warn("The parameter y on transform() is deprecated since 0.19 and will be removed in 0.21",
DeprecationWarning)
X = X.copy() # type: pd.DataFrame
if self.with_mean:
X.loc[:, self._feature_mask_] -= self.mean_
if self.with_std:
X.loc[:, self._feature_mask_] /= self.scale_
return X
def inverse_transform(self, X):
"""
Scale back the data to the original representation
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
Returns:
pd.DataFrame: Un-scaled dataset.
"""
X = X.copy() # type: pd.DataFrame
if self.with_std:
X.loc[:, self._feature_mask_] *= self.scale_
if self.with_mean:
X.loc[:, self._feature_mask_] += self.mean_
return X
@staticmethod
def _get_relevant_features(X):
"""
Returns a binary mask specifying the continuous features to operate on.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
Returns:
pd.Index: a pd.Index with name of columns specifying which features to apply the transformation on.
"""
# FIXME utilize sklearn.utils.multiclass.type_of_target()
continuous_cols = X.columns[~which_columns_are_binary(X)]
return continuous_cols
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""
Scales features to 0-1, allowing for NaNs.
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
"""
def __init__(self, only_binary_features=True, ignore_nans=True):
"""
Args:
only_binary_features (bool): Whether to apply only on binary features or across all.
ignore_nans (bool): Whether to ignore NaNs during calculation.
"""
self.only_binary_features = only_binary_features
self.ignore_nans = ignore_nans
def fit(self, X, y=None):
"""
Compute the minimum and maximum to be used for later scaling.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
y: Passthrough for ``Pipeline`` compatibility.
Returns:
MinMaxScaler: a fitted MinMaxScaler
"""
feature_mask = self._get_relevant_features(X)
self._feature_mask_ = feature_mask
self.min_ = X.min(skipna=self.ignore_nans)[feature_mask]
self.max_ = X.max(skipna=self.ignore_nans)[feature_mask]
self.scale_ = self.max_ - self.min_
# if feature_mask.size != X.shape[1]:
# self.scale_[~feature_mask] = 1
# self.min_[~feature_mask] = 0
# self.max_[~feature_mask] = 1
return self
def inverse_transform(self, X):
"""
Scaling chosen features of X to the range of 0 - 1.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] Input data that will be transformed.
Returns:
pd.DataFrame: array-like, shape [n_samples, n_features]. Transformed data.
"""
# No warning for y, since there's no y variable.
# This correpsonds to function signature in scikit-learn's code base
X = X.copy() # type: pd.DataFrame
X.loc[:, self._feature_mask_] *= self.scale_
X.loc[:, self._feature_mask_] += self.min_
return X
def transform(self, X):
"""
Undo the scaling of X according to feature_range.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] Input data that will be transformed.
Returns:
pd.DataFrame: array-like, shape [n_samples, n_features]. Transformed data.
"""
X = X.copy() # type: pd.DataFrame
X.loc[:, self._feature_mask_] -= self.min_
X.loc[:, self._feature_mask_] /= self.scale_
return X
def _get_relevant_features(self, X):
"""
Returns a binary mask specifying the features to operate on (either all features or binary features if
self.only_binary_features is True.
Args:
X (pd.DataFrame): array-like, shape [n_samples, n_features] The data used to compute the mean and standard
deviation used for later scaling along the features axis (axis=0).
Returns:
pd.Index: a binary mask specifying which features to apply the transformation on.
"""
if self.only_binary_features:
feature_mask = which_columns_are_binary(X)
else:
feature_mask = np.ones(X.shape[1], dtype=bool)
return feature_mask
class Imputer(skImputer):
def transform(self, X):
X_transformed = super().transform(X.values)
X_transformed = pd.DataFrame(
X_transformed, index=X.index, columns=X.columns)
return X_transformed
class PropensityTransformer(BaseEstimator, TransformerMixin):
def __init__(self, learner, include_covariates=False):
"""Transform covariates by adding/replacing with the propensity score.
Args:
learner (sklearn.estimator) : A learner implementing `fit` and
`predict_proba` to use for predicting the propensity score.
include_covariates (bool) : Whether to return the original
covariates alongside the "propensity" column.
"""
self.include_covariates = include_covariates
self.learner = learner
def fit(self, X, a):
self.learner.fit(X, a)
return self
def transform(self, X, treatment_values=None):
"""Append propensity or replace covariates with propensity.
Args:
X (pd.DataFrame): A DataFrame of samples to transform. This will be
input to the learner trained by fit. If the columns are
different, the results will not be valid.
treatment_values (Any | None): A desired value/s to extract
propensity to (i.e. probabilities to what treatment value
should be calculated). If not specified, then the maximal
treatment value is chosen. This is since the usual case is of
treatment (A=1) control (A=0) setting.
Returns:
pd.DataFrame : DataFrame with a "propensity" column.
If "include_covariates" is `True`, it will include all of the
original features plus "propensity", else it will only have the
"propensity" column.
"""
treatment_values = 1 if treatment_values is None else treatment_values
res = self.learner.predict_proba(X)[:, treatment_values]
res = | pd.DataFrame(res, index=X.index, columns=["propensity"]) | pandas.DataFrame |
import os
import glob
import pandas as pd
import numpy as np
import random
from sklearn.utils.validation import check_is_fitted
from sklearn.utils import check_array, as_float_array
from sklearn.base import TransformerMixin, BaseEstimator
import kneed
import scipy
import seaborn as sns
import matplotlib.pyplot as plt
def load_data(exp, plate, filetype):
"""load all data from a single experiment into a single dataframe"""
path = os.path.join('profiles',
f'{exp}',
f'{plate}',
f'*_{filetype}')
files = glob.glob(path)
print(files)
df = pd.concat(pd.read_csv(_, low_memory=False) for _ in files)
return df
def get_metacols(df):
"""return a list of metadata columns"""
return [c for c in df.columns if c.startswith("Metadata_")]
def get_featurecols(df):
"""returna list of featuredata columns"""
return [c for c in df.columns if not c.startswith("Metadata")]
def get_metadata(df):
"""return dataframe of just metadata columns"""
return df[get_metacols(df)]
def get_featuredata(df):
"""return dataframe of just featuredata columns"""
return df[get_featurecols(df)]
def remove_negcon_empty_wells(df):
"""return dataframe of non-negative control wells"""
df = (
df.query('Metadata_control_type!="negcon"')
.dropna(subset=['Metadata_broad_sample'])
.reset_index(drop=True)
)
return df
def select_only_controls(df):
""" return dataframe of only controls, without outer wells"""
#df = (
# df.query('Metadata_Well!="A*"' and 'Metadata_Well!="P*"' and 'Metadata_Well!="*01"' and 'Metadata_Well!="*24"'
# and 'Metadata_control_type!="poscon_orf"' and 'Metadata_pert_type=="control"')
# )
df = (
df.query('Metadata_pert_type=="control"')
)
return df
def concat_profiles(df1, df2):
"""Concatenate dataframes"""
if df1.shape[0] == 0:
df1 = df2.copy()
else:
frames = [df1, df2]
df1 = pd.concat(frames, ignore_index=True, join="inner")
return df1
def percent_score(null_dist, corr_dist, how):
"""
Calculates the Percent strong or percent recall scores
:param null_dist: Null distribution
:param corr_dist: Correlation distribution
:param how: "left", "right" or "both" for using the 5th percentile, 95th percentile or both thresholds
:return: proportion of correlation distribution beyond the threshold
"""
if how == 'right':
perc_95 = np.nanpercentile(null_dist, 95)
above_threshold = corr_dist > perc_95
return np.mean(above_threshold.astype(float))*100, perc_95
if how == 'left':
perc_5 = np.nanpercentile(null_dist, 5)
below_threshold = corr_dist < perc_5
return np.mean(below_threshold.astype(float))*100, perc_5
if how == 'both':
perc_95 = np.nanpercentile(null_dist, 95)
above_threshold = corr_dist > perc_95
perc_5 = np.nanpercentile(null_dist, 5)
below_threshold = corr_dist < perc_5
return (np.mean(above_threshold.astype(float)) + np.mean(below_threshold.astype(float)))*100, perc_95, perc_5
def corr_between_replicates(df, group_by_feature):
"""
Correlation between replicates
Parameters:
-----------
df: pd.DataFrame
group_by_feature: Feature name to group the data frame by
Returns:
--------
list-like of correlation values
"""
replicate_corr = []
replicate_grouped = df.groupby(group_by_feature)
for name, group in replicate_grouped:
group_features = get_featuredata(group)
corr = np.corrcoef(group_features)
if len(group_features) == 1: # If there is only one replicate on a plate
replicate_corr.append(np.nan)
else:
np.fill_diagonal(corr, np.nan)
replicate_corr.append(np.nanmedian(corr)) # median replicate correlation
return replicate_corr
def corr_between_non_replicates(df, n_samples, n_replicates, metadata_compound_name):
"""
Null distribution between random "replicates".
Parameters:
------------
df: pandas.DataFrame
n_samples: int
n_replicates: int
metadata_compound_name: Compound name feature
Returns:
--------
list-like of correlation values, with a length of `n_samples`
"""
df.reset_index(drop=True, inplace=True)
null_corr = []
while len(null_corr) < n_samples:
compounds = random.choices([_ for _ in range(len(df))], k=n_replicates)
sample = df.loc[compounds].copy()
if len(sample[metadata_compound_name].unique()) == n_replicates:
sample_features = get_featuredata(sample)
corr = np.corrcoef(sample_features)
np.fill_diagonal(corr, np.nan)
null_corr.append(np.nanmedian(corr)) # median replicate correlation
return null_corr
def correlation_between_modalities(modality_1_df, modality_2_df, modality_1, modality_2, metadata_common, metadata_perturbation):
"""
Compute the correlation between two different modalities.
:param modality_1_df: Profiles of the first modality
:param modality_2_df: Profiles of the second modality
:param modality_1: feature that identifies perturbation pairs
:param modality_2: perturbation name feature
:param metadata_common: perturbation name feature
:param metadata_perturbation: perturbation name feature
:return: list-like of correlation values
"""
list_common_perturbation_groups = list(np.intersect1d(list(modality_1_df[metadata_common]), list(modality_2_df[metadata_common])))
merged_df = | pd.concat([modality_1_df, modality_2_df], ignore_index=False, join='inner') | pandas.concat |
#py_pandas_plot.py
import pandas as pd
ser= | pd.Series([2,8,3,6,1]) | pandas.Series |
'''
Created on April 15, 2012
Last update on July 18, 2015
@author: <NAME>
@author: <NAME>
@author: <NAME>
'''
import pandas as pd
class Columns(object):
OPEN='Open'
HIGH='High'
LOW='Low'
CLOSE='Close'
VOLUME='Volume'
# def get(df, col):
# return(df[col])
# df['Close'] => get(df, COL.CLOSE)
# price=COL.CLOSE
indicators=["MA", "EMA", "MOM", "ROC", "ATR", "BBANDS", "PPSR", "STOK", "STO",
"TRIX", "ADX", "MACD", "MassI", "Vortex", "KST", "RSI", "TSI", "ACCDIST",
"Chaikin", "MFI", "OBV", "FORCE", "EOM", "CCI", "COPP", "KELCH", "ULTOSC",
"DONCH", "STDDEV"]
class Settings(object):
join=True
col=Columns()
SETTINGS=Settings()
def out(settings, df, result):
if not settings.join:
return result
else:
df=df.join(result)
return df
def MA(df, n, price='Close'):
"""
Moving Average
"""
name='MA_{n}'.format(n=n)
result = pd.Series(pd.rolling_mean(df[price], n), name=name)
return out(SETTINGS, df, result)
def EMA(df, n, price='Close'):
"""
Exponential Moving Average
"""
result=pd.Series(pd.ewma(df[price], span=n, min_periods=n - 1), name='EMA_' + str(n))
return out(SETTINGS, df, result)
def MOM(df, n, price='Close'):
"""
Momentum
"""
result=pd.Series(df[price].diff(n), name='Momentum_' + str(n))
return out(SETTINGS, df, result)
def ROC(df, n, price='Close'):
"""
Rate of Change
"""
M = df[price].diff(n - 1)
N = df[price].shift(n - 1)
result = pd.Series(M / N, name='ROC_' + str(n))
return out(SETTINGS, df, result)
def ATR(df, n):
"""
Average True Range
"""
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
# for i, idx in enumerate(df.index)
# TR=max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR = max(df['High'].iloc[i + 1], df['Close'].iloc[i] - min(df['Low'].iloc[i + 1], df['Close'].iloc[i]))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
result = pd.Series(pd.ewma(TR_s, span=n, min_periods=n), name='ATR_' + str(n))
return out(SETTINGS, df, result)
def BBANDS(df, n, price='Close'):
"""
Bollinger Bands
"""
MA = pd.Series(pd.rolling_mean(df[price], n))
MSD = pd.Series(pd.rolling_std(df[price], n))
b1 = 4 * MSD / MA
B1 = pd.Series(b1, name='BollingerB_' + str(n))
b2 = (df[price] - MA + 2 * MSD) / (4 * MSD)
B2 = pd.Series(b2, name='Bollinger%b_' + str(n))
result = pd.DataFrame([B1, B2]).transpose()
return out(SETTINGS, df, result)
def PPSR(df):
"""
Pivot Points, Supports and Resistances
"""
PP = pd.Series((df['High'] + df['Low'] + df['Close']) / 3)
R1 = pd.Series(2 * PP - df['Low'])
S1 = pd.Series(2 * PP - df['High'])
R2 = pd.Series(PP + df['High'] - df['Low'])
S2 = pd.Series(PP - df['High'] + df['Low'])
R3 = pd.Series(df['High'] + 2 * (PP - df['Low']))
S3 = pd.Series(df['Low'] - 2 * (df['High'] - PP))
result = pd.DataFrame([PP, R1, S1, R2, S2, R3, S3]).transpose()
return out(SETTINGS, df, result)
def STOK(df):
"""
Stochastic oscillator %K
"""
result = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
return out(SETTINGS, df, result)
def STO(df, n):
"""
Stochastic oscillator %D
"""
SOk = pd.Series((df['Close'] - df['Low']) / (df['High'] - df['Low']), name='SO%k')
result = pd.Series(pd.ewma(SOk, span=n, min_periods=n - 1), name='SO%d_' + str(n))
return out(SETTINGS, df, result)
def SMA(df, timeperiod, key='Close'):
result = pd.Series(pd.rolling_mean(df[key], timeperiod, min_periods=timeperiod), name='SMA_' + str(timeperiod))
return out(SETTINGS, df, result)
def TRIX(df, n):
"""
Trix
"""
EX1 = pd.ewma(df['Close'], span=n, min_periods=n - 1)
EX2 = pd.ewma(EX1, span=n, min_periods=n - 1)
EX3 = pd.ewma(EX2, span=n, min_periods=n - 1)
i = 0
ROC_l = [0]
while i + 1 <= len(df) - 1: # df.index[-1]:
ROC = (EX3[i + 1] - EX3[i]) / EX3[i]
ROC_l.append(ROC)
i = i + 1
result = pd.Series(ROC_l, name='Trix_' + str(n))
return out(SETTINGS, df, result)
def ADX(df, n, n_ADX):
"""
Average Directional Movement Index
"""
i = 0
UpI = []
DoI = []
while i + 1 <= len(df) - 1: # df.index[-1]:
UpMove = df.get_value(i + 1, 'High') - df.get_value(i, 'High')
DoMove = df.get_value(i, 'Low') - df.get_value(i + 1, 'Low')
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
i = 0
TR_l = [0]
while i < len(df) - 1: # df.index[-1]:
TR = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR_l.append(TR)
i = i + 1
TR_s = pd.Series(TR_l)
ATR = pd.Series(pd.ewma(TR_s, span=n, min_periods=n))
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1) / ATR,name='PosDI')
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1) / ATR,name='NegDI')
result = pd.Series(pd.ewma(abs(PosDI - NegDI) / (PosDI + NegDI), span=n_ADX, min_periods=n_ADX - 1), name='ADX_' + str(n) + '_' + str(n_ADX))
result = pd.concat([df,PosDI,NegDI,result], join='outer', axis=1,ignore_index=True)
result.columns=["High","Low","Close","PosDI","NegDI","ADX"]
return result
def MACD(df, n_fast, n_slow, price='Close'):
"""
MACD, MACD Signal and MACD difference
"""
EMAfast = pd.Series(pd.ewma(df[price], span=n_fast, min_periods=n_slow - 1))
EMAslow = pd.Series(pd.ewma(df[price], span=n_slow, min_periods=n_slow - 1))
MACD = pd.Series(EMAfast - EMAslow, name='MACD_%d_%d' % (n_fast, n_slow))
MACDsign = pd.Series(pd.ewma(MACD, span=9, min_periods=8), name='MACDsign_%d_%d' % (n_fast, n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_%d_%d' % (n_fast, n_slow))
result = pd.DataFrame([MACD, MACDsign, MACDdiff]).transpose()
return out(SETTINGS, df, result)
def MassI(df):
"""
Mass Index
"""
Range = df['High'] - df['Low']
EX1 = pd.ewma(Range, span=9, min_periods=8)
EX2 = pd.ewma(EX1, span=9, min_periods=8)
Mass = EX1 / EX2
result = pd.Series(pd.rolling_sum(Mass, 25), name='Mass Index')
return out(SETTINGS, df, result)
def Vortex(df, n):
"""
Vortex Indicator
"""
i = 0
TR = [0]
while i < len(df) - 1: # df.index[-1]:
Range = max(df.get_value(i + 1, 'High'), df.get_value(i, 'Close')) - min(df.get_value(i + 1, 'Low'), df.get_value(i, 'Close'))
TR.append(Range)
i = i + 1
i = 0
VM = [0]
while i < len(df) - 1: # df.index[-1]:
Range = abs(df.get_value(i + 1, 'High') - df.get_value(i, 'Low')) - abs(df.get_value(i + 1, 'Low') - df.get_value(i, 'High'))
VM.append(Range)
i = i + 1
result = pd.Series(pd.rolling_sum(pd.Series(VM), n) / pd.rolling_sum(pd.Series(TR), n), name='Vortex_' + str(n))
return out(SETTINGS, df, result)
def KST(df, r1, r2, r3, r4, n1, n2, n3, n4):
"""
KST Oscillator
"""
M = df['Close'].diff(r1 - 1)
N = df['Close'].shift(r1 - 1)
ROC1 = M / N
M = df['Close'].diff(r2 - 1)
N = df['Close'].shift(r2 - 1)
ROC2 = M / N
M = df['Close'].diff(r3 - 1)
N = df['Close'].shift(r3 - 1)
ROC3 = M / N
M = df['Close'].diff(r4 - 1)
N = df['Close'].shift(r4 - 1)
ROC4 = M / N
result = pd.Series(pd.rolling_sum(ROC1, n1) + pd.rolling_sum(ROC2, n2) * 2 + pd.rolling_sum(ROC3, n3) * 3 + pd.rolling_sum(ROC4, n4) * 4, name='KST_' + str(r1) + '_' + str(r2) + '_' + str(r3) + '_' + str(r4) + '_' + str(n1) + '_' + str(n2) + '_' + str(n3) + '_' + str(n4))
return out(SETTINGS, df, result)
def RSI(df, n):
"""
Relative Strength Index
"""
i = 0
UpI = [0]
DoI = [0]
while i + 1 <= len(df) - 1: # df.index[-1]
UpMove = df.iloc[i + 1]['High'] - df.iloc[i]['High']
DoMove = df.iloc[i]['Low'] - df.iloc[i + 1]['Low']
if UpMove > DoMove and UpMove > 0:
UpD = UpMove
else:
UpD = 0
UpI.append(UpD)
if DoMove > UpMove and DoMove > 0:
DoD = DoMove
else:
DoD = 0
DoI.append(DoD)
i = i + 1
UpI = pd.Series(UpI)
DoI = pd.Series(DoI)
PosDI = pd.Series(pd.ewma(UpI, span=n, min_periods=n - 1))
NegDI = pd.Series(pd.ewma(DoI, span=n, min_periods=n - 1))
result = pd.Series(PosDI / (PosDI + NegDI), name='RSI_' + str(n))
return out(SETTINGS, df, result)
def TSI(df, r, s):
"""
True Strength Index
"""
M = pd.Series(df['Close'].diff(1))
aM = abs(M)
EMA1 = pd.Series(pd.ewma(M, span=r, min_periods=r - 1))
aEMA1 = pd.Series(pd.ewma(aM, span=r, min_periods=r - 1))
EMA2 = pd.Series(pd.ewma(EMA1, span=s, min_periods=s - 1))
aEMA2 = pd.Series( | pd.ewma(aEMA1, span=s, min_periods=s - 1) | pandas.ewma |
#!/usr/bin/env python
# coding: utf-8
# # Romantic Relationship Data Analysis (Facebook)
# #### Author: Md. <NAME>
# As a Data Science enthusiast, I have always been thinking about an interesting project which will analyze my personal life. I've been in a serious long-term commitment since the last of July 2020. So, I've recently thought about collecting the desired dataset from 'Facebook Request Information Centre', exploring and cleaning data, and then analyze whatever I can derive from the data. Now let's get started.
# ## Importing Libraries
# At first we will import the necessary Python libraries. If the following libraries are not existing, then we need to install using `!pip install (library package name)`.
# For example: `!pip install chart_studio`, in case you don't have chart-studio install on your machine.
#
# In[1]:
import json
import numpy as np
import pandas as pd
import glob
import seaborn as sns
import plotly
import chart_studio.plotly as py
import plotly.graph_objs as go
from plotly import tools
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from nltk.corpus import stopwords
from datetime import date
import chart_studio
# ## Plotly Credentials
# Now sign up for a new account on Chart-studio using a valid username and then they will provide you with a new unique API Key upon verification by email. Now using the following code, insert your own unique username and API Key to work with interactive Plotly features and tools.
# Go to the following link to create new account or sign-in to your Chart-studio account:
# https://chart-studio.plotly.com/settings/api
# In[2]:
chart_studio.tools.set_credentials_file(username='Abrar2652', api_key='<KEY>E<KEY>')
# ## Data Cleaning
# Upon request from 'Facebook Request Information Section', the authority prepared the data items for me that I chose to download. Within the next 48 hours, they sent me the email regarding the completion of data preparation. The lead time depends upon the size of data you request. In the 'Download Your Information' section, I downloaded the `messages` in zip format they prepared from me. I only requested for the messages from `July 2020 to February 2021` because my conversation started with my life-partner on July 2020.
#
# There will be all the conversations in the zip file mentioning the names of the people you conversed with. The zip file is quite large (3.05 GB), so make sure you don't request for unnecessary data from Facebook to avoid killing your valuable time. There will also be all the audio, video, photo files you exchanged with all people. So just extract the necessary *JSON message files*.
#
# You may face difficulties merging all the JSON files into one. I suggest you to do it manually using the following steps:
# 1. Keep the first JSON file intact, don't change it.
# 2. Copy the messages from other files and paste it in the desired place of the first file. If you know the Python Dictionary format, then it's easier for you to paste the messages in correct manner.
# 3. Then delete all the messages containing no text. I mean you should remove the links you shared. Just open notepad. find 'youtube' and remove that certain message.
# 4. The efficient way to clean the data is to run the following code and you'll see error at a certain line. In the notepad, find and go to that line. Just delete that message showing error because usually error encounters when the message contains link or NULL
# In[3]:
#Path to the JSON files
filepath = "G:\exp\message_1.json"
from copy import deepcopy
def cross_join(left, right):
new_rows = []
for left_row in left:
for right_row in right:
temp_row = deepcopy(left_row)
for key, value in right_row.items():
temp_row[key] = value
new_rows.append(deepcopy(temp_row))
return new_rows
def flatten_list(data):
for elem in data:
if isinstance(elem, list):
yield from flatten_list(elem)
else:
yield elem
def json_to_dataframe(data_in):
def flatten_json(data, prev_heading=''):
if isinstance(data, dict):
rows = [{}]
for key, value in data.items():
rows = cross_join(rows, flatten_json(value, prev_heading + '.' + key))
elif isinstance(data, list):
rows = []
for i in range(len(data)):
[rows.append(elem) for elem in flatten_list(flatten_json(data[i], prev_heading))]
else:
rows = [{prev_heading[1:]: data}]
return rows
return pd.DataFrame(flatten_json(data_in))
if __name__ == '__main__':
with open(filepath) as json_file:
json_data = json.load(json_file)
df = json_to_dataframe(json_data)
df=df
df.tail()
# Convert the timestamp provided in millisecond to the human recognizable date and time format
# In[4]:
df['date_time']=pd.to_datetime(df['messages.timestamp_ms'], unit='ms')
# Delete the unnecessary columns for our analysis
# In[5]:
df=df.drop(['messages.timestamp_ms','messages.reactions.reaction','messages.photos.creation_timestamp','messages.audio_files.uri','messages.audio_files.creation_timestamp','messages.videos.creation_timestamp','messages.type','messages.files.creation_timestamp'],axis=1)
df.date_time
# Let's count the total messages per day. Don't get panicked since we all have got different messaging styles. I prefer short messages and line-by-line texts but she prefers to text all her feelings into one single message. So no matter how many words creates a message, it'll be counted as a single message :)
# In[6]:
df_timeline = df.groupby([df['date_time'].dt.date, 'messages.sender_name'])['messages.content'].count().reset_index()
df_timeline
#
# We exchanged *481.40* messages per day on average. Just sum up all the message contents and divide it by the number of rows.
# In[36]:
df_timeline['messages.content'].sum()/(393/2)
# In[7]:
df['messages.sender_name']
# ### Overall count of messages:
# In[8]:
abrar_tot = df[df['messages.sender_name'] == '<NAME>'].count()
mithi_tot = df[df['messages.sender_name'] == '<NAME>'].count()
# In[9]:
abrar_tot
# Let's count the average number of messages sent by me and her individually.
# In[37]:
df[df['messages.sender_name'] == '<NAME>']['messages.content'].count()/(393/2)
# In[10]:
mithi_tot
# In[41]:
df[df['messages.sender_name'] == '<NAME>']['messages.content'].count()/(393/2)
# ### Standard Deviation of individual texting
# In[11]:
abrar_std = np.std(df_timeline[df_timeline['messages.sender_name']=='<NAME>']['messages.content'])
mithi_std = np.std(df_timeline[df_timeline['messages.sender_name']=='<NAME>']['messages.content'])
print(abrar_std)
print(mithi_std)
# ## Timeline of message sending by both:
# In[12]:
## timeline of message sending by both
traceA = go.Scatter(
x = list(df_timeline[df_timeline['messages.sender_name']=='<NAME>'].date_time),
y = list(df_timeline[df_timeline['messages.sender_name']=='<NAME>']['messages.content']),
mode = 'lines',
name = 'by Abrar',
marker = dict(
color = 'rgb(221,46,107)'
)
)
traceM = go.Scatter(
x = list(df_timeline[df_timeline['messages.sender_name']=='<NAME>'].date_time),
y = list(df_timeline[df_timeline['messages.sender_name']=='<NAME>']['messages.content']),
mode = 'lines',
name = 'by Mithi',
marker = dict(
color = 'rgb(0,102,153)'
)
)
data = [traceM, traceA]
# slider set up:
layout = dict(
title='All Messages Ever Sent, Timeline',
legend = dict(orientation="h"),
xaxis=dict(
rangeselector=dict(
buttons=list([
dict(count=1,
label='1m',
step='month',
stepmode='backward'),
dict(count=6,
label='6m',
step='month',
stepmode='backward'),
dict(count=1,
label='YTD',
step='year',
stepmode='todate'),
dict(count=1,
label='1y',
step='year',
stepmode='backward'),
dict(step='all')
])
),
rangeslider=dict(
visible = True
),
type='date'
)
)
# In[13]:
fig = dict(data=data, layout=layout)
py.iplot(fig, filename='analysis2-messaging-trends')
# ### Adding weekday column:
# In[14]:
df['weekday'] = df['date_time'].apply(lambda x: x.weekday()) # list of the days of the week
def label_daysofweek (row):
if row['weekday'] == 0 :
return 'Monday'
if row['weekday'] == 1 :
return 'Tuesday'
if row['weekday'] == 2 :
return 'Wednesday'
if row['weekday'] == 3 :
return 'Thursday'
if row['weekday'] == 4 :
return 'Friday'
if row['weekday'] == 5 :
return 'Saturday'
if row['weekday'] == 6 :
return 'Sunday'
return
# ## Weekday averages from each sender:
# In[15]:
df_weekly = df.groupby(['messages.sender_name',df['date_time'].dt.day,'weekday'])['messages.content'].count().reset_index()
df_weekly['weekday_w'] = df_weekly.apply(lambda row: label_daysofweek (row),axis=1)
df_weekly_avg = df_weekly.groupby(['weekday','messages.sender_name'])['messages.content'].mean().reset_index()
df_weekly = df_weekly.sort_values(by='weekday')
df_weekly = df_weekly[df_weekly['messages.content']<700]
# plotting:
x = df_weekly[df_weekly['messages.sender_name']=='<NAME>'].weekday_w.tolist()
y_abrar= df_weekly[df_weekly['messages.sender_name']=='<NAME>']['messages.content'].tolist()
y_mithi= df_weekly[df_weekly['messages.sender_name']=='<NAME>']['messages.content'].tolist()
trace_abrar = go.Box(
y=y_abrar,
x=df_weekly[df_weekly['messages.sender_name']=='<NAME>'].weekday_w.tolist(),
name='by Abrar',
marker = dict(
color = 'rgb(221,46,107)',
outliercolor = 'rgba(224, 35, 79, 0.6)',
line = dict(
outliercolor = 'rgba(224, 35, 79, 0.6)',
outlierwidth = 2)),
)
trace_mithi = go.Box(
y=y_mithi,
x=df_weekly[df_weekly['messages.sender_name']=='<NAME>'].weekday_w.tolist(),
name='by Mithi',
marker=dict(
color = 'rgb(0,102,153)',
outliercolor = 'rgba(0, 73, 153, 0.6)',
line = dict(
outliercolor = 'rgba(0, 73, 153, 0.6)',
outlierwidth = 2)
)
)
layout = go.Layout(
title='Weekly Messages Breakdown',
yaxis=dict(
zeroline=False,
title='Distinct Messages Sent'
),
boxmode='group'
)
data = [trace_mithi, trace_abrar]
fig2 = go.Figure(data=data, layout=layout)
py.iplot(fig2,filename='analysis2-weekday-msgs')
# In[16]:
df_weekly
# ## Time of day analysis:
# Let's group the texts on the basis of each hour. We can see that we both are super active at the 18th hour of the day and the lowest active at the 0th hour.
# In[52]:
df_time_day['avg_mgs_hr'].max()
# In[53]:
df_time_day['avg_mgs_hr'].min()
# In[44]:
tot_days = max(df.date_time.dt.date)-min(df.date_time.dt.date)
df_time_day = df.groupby([df['date_time'].dt.hour,'messages.sender_name'])['messages.content'].count().reset_index()
df_time_day['avg_mgs_hr'] = df_time_day['messages.content']/(393/2)
df_time_day
# ## Plot the histogram based on the total counts on it
# ### plot the daily average
# In[45]:
gt_daytime = np.array(df_time_day[df_time_day['messages.sender_name']=='<NAME>'].avg_mgs_hr)
oj_daytime = np.array(df_time_day[df_time_day['messages.sender_name']=='<NAME>'].avg_mgs_hr)
diff = oj_daytime-gt_daytime
trace1 = go.Bar(
x=df_time_day[df_time_day['messages.sender_name']=='<NAME>'].date_time,
y=df_time_day[df_time_day['messages.sender_name']=='<NAME>'].avg_mgs_hr,
name='by Abrar',
marker=dict(
color = 'rgb(221,46,107)'
)
)
trace2 = go.Bar(
x=df_time_day[df_time_day['messages.sender_name']=='<NAME>'].date_time,
y=df_time_day[df_time_day['messages.sender_name']=='<NAME>'].avg_mgs_hr,
name='by Mithi',
marker=dict(
color='rgb(0,102,153)',
)
)
trace3 = go.Bar(
x=df_time_day[df_time_day['messages.sender_name']=='<NAME>'].date_time,
y=diff,
name='difference',
marker=dict(
color='rgb(244,130,24)',
)
)
data = [trace1, trace2,trace3]
layout = go.Layout(
title='Average Hourly Messages',
xaxis = dict(title='Time of Day'),
yaxis = dict(title='Average No. of Messages'),
legend = dict(orientation="h"),
barmode='group'
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='analysis2-avg-per-hour')
# ## Looking at the differences in time
#
# #### Who's the initiator?
# #### Find the first messages of each day (after 7am)
# #### Find the first messages of each conversation (gaps of 2 hours between last and first)
# In[46]:
# removing anything before 7am:
df_7_am = df[df['date_time'].dt.hour > 6]
# finding the first messages of each day:
df_firsts = df_7_am.groupby(df['date_time'].dt.date).apply(lambda x: x.iloc[[0]])
# plotting:
# plot the count of first messages (separate curve pp), group by hour of day, show the timeline
df_firsts = df_firsts.rename(index=str, columns={"date_time": "time1"}).reset_index()
df_firsts['hour'] = df_firsts.time1.dt.hour
#df_firsts = df_firsts.groupby([df_firsts['time1'].dt.hour,'sender']).count()
#df_firsts = df_firsts.drop(columns=['date_time','time1']).reset_index()
trace1 = go.Histogram(
x=df_firsts[df_firsts['messages.sender_name']=='<NAME>'].hour,
name='by Abrar',
opacity=0.75,
xbins=dict(
start=7.0,
end=24.0,
size=1
),
marker=dict(
color='rgb(221,46,107)',
)
)
trace2 = go.Histogram(
x=df_firsts[df_firsts['messages.sender_name']=='<NAME>'].hour,
name='by Mithi',
opacity=0.75,
xbins=dict(
start=7.0,
end=24.0,
size=1
),
marker=dict(
color='rgb(0,102,153)',
)
)
data = [trace2, trace1]
layout = go.Layout(
barmode='overlay',
title='First Messages of the Day',
xaxis = dict(title='Time of Day'),
yaxis = dict(title = 'Distinct Messages'),
legend = dict(orientation="h"),
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='analysis2-first-msg-dist')
# ### Plotting number of initiations for each month (sum on num-msg, in group by month)
# In[20]:
df_firsts.date_time = | pd.to_datetime(df_firsts['date_time']) | pandas.to_datetime |
__all__ = [
'PrettyPachydermClient'
]
import logging
import re
from typing import Dict, List, Iterable, Union, Optional
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pandas.io.formats.style as style
import pandas as pd
import numpy as np
import yaml
from IPython.core.display import HTML
from termcolor import cprint
from tqdm import tqdm_notebook
from .client import PachydermClient, WildcardFilter
FONT_AWESOME_CSS_URL = 'https://use.fontawesome.com/releases/v5.8.1/css/all.css'
CLIPBOARD_JS_URL = 'https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.4/clipboard.js'
BAR_COLOR = '#105ecd33'
PROGRESS_BAR_COLOR = '#03820333'
# Make yaml.dump() keep the order of keys in dictionaries
yaml.add_representer(
dict,
lambda self,
data: yaml.representer.SafeRepresenter.represent_dict(self, data.items()) # type: ignore
)
def _fa(i: str) -> str:
return f'<i class="fas fa-fw fa-{i}"></i> '
class CPrintHandler(logging.StreamHandler):
def emit(self, record: logging.LogRecord):
color = {
logging.INFO: 'green',
logging.WARNING: 'yellow',
logging.ERROR: 'red',
logging.CRITICAL: 'red',
}.get(record.levelno, 'grey')
cprint(self.format(record), color=color)
class PrettyTable(HTML):
def __init__(self, styler: style.Styler, df: pd.DataFrame):
super().__init__(data=styler.render())
self.raw = df
self.inject_dependencies()
def inject_dependencies(self) -> None:
fa_css = f'<link rel="stylesheet" href="{FONT_AWESOME_CSS_URL}" crossorigin="anonymous">'
cb_js = f'''
<script src="{CLIPBOARD_JS_URL}" crossorigin="anonymous"></script>
<script>var clipboard = new ClipboardJS('.copyable');</script>
'''
self.data = fa_css + cb_js + self.data # type: ignore
class PrettyYAML(HTML):
def __init__(self, obj: object):
super().__init__(data=self.format_yaml(obj))
self.raw = obj
@staticmethod
def format_yaml(obj: object) -> str:
s = str(yaml.dump(obj))
s = re.sub(r'(^[\s-]*)([^\s]+:)', '\\1<span style="color: #888;">\\2</span>', s, flags=re.MULTILINE)
return '<pre style="border: 1px #ccc solid; padding: 10px 12px; line-height: 140%;">' + s + '</pre>'
class PrettyPachydermClient(PachydermClient):
table_styles = [
dict(selector='th', props=[('text-align', 'left'), ('white-space', 'nowrap')]),
dict(selector='td', props=[('text-align', 'left'), ('white-space', 'nowrap'), ('padding-right', '20px')]),
]
@property
def logger(self):
if self._logger is None:
self._logger = logging.getLogger('pachypy')
self._logger.handlers = [CPrintHandler()]
self._logger.setLevel(logging.DEBUG)
self._logger.propagate = False
return self._logger
def list_repos(self, repos: WildcardFilter = '*') -> PrettyTable:
df = super().list_repos(repos=repos)
dfr = df.copy()
df.rename({
'repo': 'Repo',
'is_tick': 'Tick',
'branches': 'Branches',
'size_bytes': 'Size',
'created': 'Created',
}, axis=1, inplace=True)
df['Tick'] = df['Tick'].map({True: _fa('stopwatch'), False: ''})
df['Branches'] = df['Branches'].apply(', '.join)
styler = df[['Repo', 'Tick', 'Branches', 'Size', 'Created']].style \
.bar(subset=['Size'], color=BAR_COLOR, vmin=0) \
.format({'Created': self._format_datetime, 'Size': self._format_size}) \
.set_properties(subset=['Branches'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_commits(self, repos: WildcardFilter, n: int = 10) -> PrettyTable:
df = super().list_commits(repos=repos, n=n)
dfr = df.copy()
df.rename({
'repo': 'Repo',
'commit': 'Commit',
'branches': 'Branch',
'size_bytes': 'Size',
'started': 'Started',
'finished': 'Finished',
'parent_commit': 'Parent Commit',
}, axis=1, inplace=True)
styler = df[['Repo', 'Commit', 'Branch', 'Size', 'Started', 'Finished', 'Parent Commit']].style \
.bar(subset=['Size'], color=BAR_COLOR, vmin=0) \
.format({
'Commit': self._format_hash,
'Parent Commit': self._format_hash,
'Branch': ', '.join,
'Started': self._format_datetime,
'Finished': self._format_datetime,
'Size': self._format_size
}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_files(self, repos: WildcardFilter, branch: Optional[str] = 'master', commit: Optional[str] = None,
glob: str = '**', files_only: bool = True) -> PrettyTable:
df = super().list_files(repos=repos, branch=branch, commit=commit, glob=glob, files_only=files_only)
dfr = df.copy()
df.rename({
'repo': 'Repo',
'type': 'Type',
'path': 'Path',
'size_bytes': 'Size',
'commit': 'Commit',
'branches': 'Branch',
'committed': 'Committed',
}, axis=1, inplace=True)
styler = df[['Repo', 'Commit', 'Branch', 'Type', 'Path', 'Size', 'Committed']].style \
.bar(subset=['Size'], color=BAR_COLOR, vmin=0) \
.format({
'Type': self._format_file_type,
'Size': self._format_size,
'Commit': self._format_hash,
'Branch': ', '.join,
'Committed': self._format_datetime
}) \
.set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_pipelines(self, pipelines: WildcardFilter = '*') -> PrettyTable:
df = super().list_pipelines(pipelines=pipelines)
dfr = df.copy()
df['sort_key'] = df.index.map(self._calc_pipeline_sort_key(df['input_repos'].to_dict()))
df.sort_values('sort_key', inplace=True)
df.rename({
'pipeline': 'Pipeline',
'state': 'State',
'cron_spec': 'Cron',
'cron_prev_tick': 'Last Tick',
'cron_next_tick': 'Next Tick',
'input': 'Input',
'output_branch': 'Output',
'datum_tries': 'Tries',
'created': 'Created',
}, axis=1, inplace=True)
df.loc[df['jobs_running'] > 0, 'State'] = 'job running'
now = datetime.now(self.user_timezone)
df['Next Tick In'] = (now - df['Next Tick']).dt.total_seconds() * -1
df['Parallelism'] = ''
df.loc[df['parallelism_constant'] > 0, 'Parallelism'] = \
_fa('hashtag') + df['parallelism_constant'].astype(str)
df.loc[df['parallelism_coefficient'] > 0, 'Parallelism'] = \
_fa('asterisk') + df['parallelism_coefficient'].astype(str)
df['Jobs'] = \
'<span style="color: green">' + df['jobs_success'].astype(str) + '</span>' + \
np.where(df['jobs_failure'] > 0, ' + <span style="color: red">' + df['jobs_failure'].astype(str) + '</span>', '')
styler = df[['Pipeline', 'State', 'Cron', 'Next Tick In', 'Input', 'Output', 'Parallelism', 'Jobs', 'Created']].style \
.apply(self._style_pipeline_state, subset=['State']) \
.format({
'State': self._format_pipeline_state,
'Cron': self._format_cron_spec,
'Next Tick In': self._format_duration,
'Created': self._format_datetime,
}) \
.set_properties(subset=['Input'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_jobs(self, pipelines: WildcardFilter = '*', n: int = 20, hide_null_jobs: bool = True) -> PrettyTable:
df = super().list_jobs(pipelines=pipelines, n=n, hide_null_jobs=hide_null_jobs)
dfr = df.copy()
df.rename({
'job': 'Job',
'pipeline': 'Pipeline',
'state': 'State',
'started': 'Started',
'duration': 'Duration',
'restart': 'Restarts',
'download_bytes': 'Downloaded',
'upload_bytes': 'Uploaded',
'output_commit': 'Output Commit',
}, axis=1, inplace=True)
df['Duration'] = df['Duration'].dt.total_seconds()
df['Progress'] = \
df['progress'].fillna(0).apply(lambda x: f'{x:.0%}') + ' | ' + \
'<span style="color: green">' + df['data_processed'].astype(str) + '</span>' + \
np.where(df['data_skipped'] > 0, ' + <span style="color: purple">' + df['data_skipped'].astype(str) + '</span>', '') + \
' / <span>' + df['data_total'].astype(str) + '</span>'
styler = df[['Job', 'Pipeline', 'State', 'Started', 'Duration', 'Progress', 'Restarts', 'Downloaded', 'Uploaded', 'Output Commit']].style \
.bar(subset=['Duration'], color=BAR_COLOR, vmin=0) \
.apply(self._style_job_state, subset=['State']) \
.apply(self._style_job_progress, subset=['Progress']) \
.format({
'Job': self._format_hash,
'State': self._format_job_state,
'Started': self._format_datetime,
'Duration': self._format_duration,
'Restarts': lambda i: _fa('undo') + str(i) if i > 0 else '',
'Downloaded': self._format_size,
'Uploaded': self._format_size,
'Output Commit': self._format_hash
}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def list_datums(self, job: str) -> PrettyTable:
df = super().list_datums(job=job)
dfr = df.copy()
df.rename({
'job': 'Job',
'datum': 'Datum',
'state': 'State',
'repo': 'Repo',
'type': 'Type',
'path': 'Path',
'size_bytes': 'Size',
'commit': 'Commit',
'committed': 'Committed',
}, axis=1, inplace=True)
styler = df[['Job', 'Datum', 'State', 'Repo', 'Type', 'Path', 'Size', 'Commit', 'Committed']].style \
.bar(subset=['Size'], color=BAR_COLOR, vmin=0) \
.apply(self._style_datum_state, subset=['State']) \
.format({
'Job': self._format_hash,
'Datum': self._format_hash,
'State': self._format_datum_state,
'Type': self._format_file_type,
'Size': self._format_size,
'Commit': self._format_hash,
'Committed': self._format_datetime
}) \
.set_properties(subset=['Path'], **{'white-space': 'normal !important'}) \
.set_table_styles(self.table_styles) \
.hide_index()
return PrettyTable(styler, dfr)
def get_logs(self, pipelines: WildcardFilter = '*', datum: Optional[str] = None,
last_job_only: bool = True, user_only: bool = False, master: bool = False, tail: int = 0) -> None:
df = super().get_logs(pipelines=pipelines, last_job_only=last_job_only, user_only=user_only, master=master, tail=tail)
job = None
worker = None
for _, row in df.iterrows():
if row.job != job:
print()
cprint(f' Pipeline {row.pipeline} ' + (f'| Job {row.job} ' if row.job else ''), 'yellow', 'on_grey')
if row.worker != worker:
cprint(f' Worker {row.worker} ', 'white', 'on_grey')
color = 'grey' if row.user else 'blue'
message = row.message
if 'warning' in message.lower():
color = 'magenta'
elif 'error' in message.lower() or 'exception' in message.lower() or 'critical' in message.lower():
color = 'red'
cprint(f'[{row.ts}] {message}', color)
job = row.job
worker = row.worker
def inspect_repo(self, repo: str) -> PrettyYAML:
info = super().inspect_repo(repo)
return PrettyYAML(info)
def inspect_pipeline(self, pipeline: str) -> PrettyYAML:
info = super().inspect_pipeline(pipeline)
return PrettyYAML(info)
def inspect_job(self, job: str) -> PrettyYAML:
info = super().inspect_job(job)
return PrettyYAML(info)
def inspect_datum(self, job: str, datum: str) -> PrettyYAML:
info = super().inspect_datum(job, datum)
return PrettyYAML(info)
@staticmethod
def _calc_pipeline_sort_key(input_repos: Dict[str, List[str]]):
def get_dag_distance(p, i=0):
yield i
for d in input_repos[p]:
if d in pipelines:
yield from get_dag_distance(d, i + 1)
def get_dag_dependencies(p):
yield p
for d in input_repos[p]:
if d in pipelines:
yield from get_dag_dependencies(d)
pipelines = set(input_repos.keys())
dag_distance = {p: max(list(get_dag_distance(p))) for p in pipelines}
dag_nodes = {p: set(get_dag_dependencies(p)) for p in pipelines}
for p, nodes in dag_nodes.items():
for node in nodes:
dag_nodes[node].update(nodes)
dag_name = {p: min(nodes) for p, nodes in dag_nodes.items()}
return {p: f'{dag_name[p]}/{dag_distance[p]}' for p in pipelines}
def _format_datetime(self, d: datetime) -> str:
if | pd.isna(d) | pandas.isna |
import pandas as pd
import numpy as np
from datetime import datetime
from fbprophet import Prophet
from tqdm import tqdm
import time
from multiprocessing import Pool, cpu_count
from multiprocessing.pool import ThreadPool
from utils.stdout_silencer import suppress_stdout_stderr
import plotly.graph_objs as go
import plotly.offline as py
import logging
logger = logging.getLogger("fbprophet")
logger.setLevel(logging.ERROR)
def run_model(ts, name, date=datetime.now().strftime("%Y-%m-%d"), **args):
m = CovidModel(ts, name, date, **args)
return m()
def run_wrapper(args):
import logging
logger = logging.getLogger("fbprophet")
logger.setLevel(logging.ERROR)
return run_model(**args)
class CovidModel:
def __init__(
self, ts, name="default", date=datetime.now().strftime("%Y-%m-%d"), **args
):
# Model Essentials
self.name = name
self.date = date
self.ts = ts
self.train_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 26 06:04:34 2017
A set of functions to analyze autosal conductivity files/data
@author: <NAME>
"""
# break into two
#docstrings
# keyword argument in calibration default = worm
import csv
import numpy as np
import pandas as pd
import sys
import os
def SaltLoad(saltFile):
""" Converts a autosal salinometer output file to a Pandas Dataframe.
Input:
- saltFile (file), an unextended file containing the output file
from the autosal salinometer. Contains columns/values such
as STATION NUMBER, CAST NUMBER,SAMPLENUMBER, CONDUCTIVITY RATIO,
etc.
Ex. saltFile = '/data/salt/ssscc'
Output:
- saltDF (Pandas Dataframe),Dataframe with 15 Columns containing the input data with
appropriate column names for the data.
Usage:
>>> saltDF = SaltLoad(saltFile)
"""
f = open(saltFile, newline='')
saltF = csv.reader(f,delimiter=' ', quoting=csv.QUOTE_NONE, skipinitialspace='True')
saltArray = []
for row in saltF:
saltArray.append(row)
del saltArray[0]
header = ['STNNBR','CASTNO','SAMPNO','BathTEMP','CRavg','autosalSAMPNO',\
'Unknown','StartTime','EndTime','Attempts','Reading1','Reading2',\
'Reading3', 'Reading4', 'Reading5']
f.close()
# make all rows of Salt files the same length as header
for row in saltArray:
if len(row) < len(header):
row.extend([np.NaN]*(len(header)-len(row)))
saltArray = np.array(saltArray) # change to np array
saltDF = | pd.DataFrame(saltArray,columns=header) | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pandas as pd
import requests
class APIHelper:
CHUNKSIZE = 100000
def __init__(self, origin_url, request_headers={}, request_data={}):
if type(origin_url) is not str:
raise Exception("URL must be a string")
else:
self.url = origin_url
if type(request_headers) is not dict:
raise Exception("Headers must be a dict")
else:
self.request_headers = request_headers
if type(request_data) is not dict:
raise Exception("Request datas must be a dict")
else:
self.request_data = request_data
def get_data(self, dataframe=False):
"""
Get data form api request. Returns a JSON or a Pandas Dataframe in case of need by user
"""
response = requests.get(
self.url, headers=self.request_headers, data=self.request_data
)
df_list = list()
if response.status_code == 200:
if dataframe:
df_list.extend(response.json())
df = | pd.DataFrame(df_list) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 31 13:31:13 2019
@author: mehrdad
"""
import pandas as pd
import numpy as np
import tslib.trip_detection
# Compute the difference between observed trips and computed trips ----------------------
# Any mode to any mode
def compute_observed_vs_computed_diffs(observed_, computed_):
M1 = pd.merge(observed_[['duration_in_min','distance','od_distance2','emission',
'walk_distance', 'bike_distance', 'active_distance']],
computed_[['duration_in_min','distance','od_distance2','emission',
'walk_distance', 'bike_distance', 'active_distance']],
left_index=True, right_index=True,
how='left',
suffixes=['_observed', '_alt'],
indicator=True)
#TOOD: whenever need the user-trip-plan_id column values as columns:
# ... M1.reset_index()
diff = | pd.DataFrame() | pandas.DataFrame |
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import seaborn as sns
import pandas as pd
from collections import defaultdict
import matplotlib
font = {'weight' : 'bold',
'size' : 30}
matplotlib.rc('font', **font)
matplotlib.rc('font', **font)
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# Version: June/2020
# This file implements the plots methods for bandits algorithm.
ylabel_dict = {'pe': 'probability of error',
'sc': 'sample complexity',
}
arm_name_dict = {
0: 'A',
1: 'B',
2: 'C',
3: 'D'
}
line_style_list = ['-','--','-.',':']
marker_list = ['o','s','v','^', '.', '>', '<']
line_color_list = ['C0', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
est_L_labels = ['Estimated L', 'L_at_10', 'L_at_200', 'True L']
#--------------------------------------------------------------------------------------
def plot_eva(results, eva_method, type = 'barplot', paper_flag = False, with_para = True, log_scale= False,
plot_confi_interval = False, method = 'all', exper = 'all',
title = 'Performance on simulated distributions', save_flag = True):
"""Plot method for evaluations
Parameters
-----------------------------------------------------------
results: dict
keys: 'env name + num_exper + num_rounds'
values: dict
keys: 'est_var + hyperpara' or 'bound'
values: dict
list of result
eva_method: str
options ('pe', 'sc')
type: str
options ('barplot', 'lineplot')
paper_flag: boolean
indicates whether plotting for paper.
True is for paper; False is not.
log_scale: boolean
if True, plot y axis as log scale
plot_confi_interval: boolean
if True, plot confidence interval (mu - sigma, mu + sigma))
method: string
if 'all', plot for all availble methods
otherwise, plot specified method
exper: string
if 'all', plot for general uses
otherwise, plot for specific format, e.g. 'est_L', 'hyperpara'
save_flag: boolean
True: save fig
"""
fig = plt.figure(figsize=(4 * 3, 3* len(results.keys())))
for i, name in enumerate(results.keys()):
ax = fig.add_subplot(len(results.keys()),3, i+1)
ax.set_title(title.replace('_', ' '))
ax.set_xlabel('Algorithms')
ax.set_ylabel(ylabel_dict[eva_method])
for j, subname in enumerate(results[name].keys()):
# setup label
if paper_flag:
label = subname.replace('Adp-Q', 'Adp_Q').split('-')[0]
# change presented names
if label == 'uniform_sampling':
label = 'Q-Uniform'
if label == 'batch_elimination':
label = 'Q-BS'
if label == 'Q_SAR_Simplified':
label = 'Q_SAR'
if label == 'SAR_Simplified':
label = 'SAR'
if with_para:
para = subname.split('-')[-1]
if ',' in para:
label = label + '-' + para.split(',')[0] + ']'
label = label.replace('_', '-')
else:
label = subname
if label == 'epsilon-greedy':
label = r'$\epsilon$-greedy'
if exper == 'est_L':
label = est_L_labels[j]
ax.set_title("Sensitiveness test for lower bound of hazard rate")
elif exper == 'hyperpara':
label = label.split(']')[0] + ']'
if method == 'all' or (method !='all' and subname == method):
mean = np.mean(results[name][subname])
sigma = 0.1 * np.std(results[name][subname])
if type == 'barplot':
if eva_method == 'pe':
width = 0.8
else:
width = 0.6
ax.bar([label], mean, width=width, yerr = sigma)
plt.xticks(rotation=90)
if log_scale:
ax.set_yscale('log')
if save_flag:
file_name = '../plots/' + title + '.pdf'
fig.savefig(file_name, bbox_inches='tight')
def plot_eva_budget(results, eva_method, type = 'lineplot', paper_flag = False, with_para = True, log_scale= False,
plot_confi_interval = False, method = 'all', exper = 'all',
title = 'Performance on simulated distributions', save_flag = True):
"""Plot method for evaluations
Parameters
-----------------------------------------------------------
results: dict
keys: 'env name + num_exper + num_rounds'
values: dict
keys: 'est_var + hyperpara' or 'bound'
values: dict
list of result
eva_method: str
options ('pe', 'sc')
type: str
options ('barplot', 'lineplot')
paper_flag: boolean
indicates whether plotting for paper.
True is for paper; False is not.
log_scale: boolean
if True, plot y axis as log scale
plot_confi_interval: boolean
if True, plot confidence interval (mu - sigma, mu + sigma))
method: string
if 'all', plot for all availble methods
otherwise, plot specified method
exper: string
if 'all', plot for general uses
otherwise, plot for specific format, e.g. 'est_L', 'hyperpara'
save_flag: boolean
True: save fig
"""
plt.figure(figsize=(5, 4))
for i, name in enumerate(results.keys()):
# ax = fig.add_subplot(len(results.keys()),3, i+1)
# ax.set_title(title.replace('_', ' '))
# ax.set_xlabel('Algorithms')
# ax.set_ylabel(ylabel_dict[eva_method])
mean_dict = defaultdict(list) # key: policy; value: list of mean of rewards
std_dict = defaultdict(list) # key: policy; value: list of std of rewards
budget_dict = defaultdict(list) # key: policy; value: list of budget
plot_df = | pd.DataFrame(columns=['Budget', 'Probability of Error', 'std', 'Policy']) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#!/usr/bin/env python3
import pandas as pd
import sys
import numpy as np
import os
def print_full(x):
pd.set_option('display.max_rows', len(x))
print(x)
pd.reset_option('display.max_rows')
sys.path.append("/Users/tkaiser2/bin")
sys.path.append("/home/tkaiser2/bin")
from tymer import *
bins=np.zeros(10)
upper=[1,2,4,8,12,18,24,30,36,5000]
lower=[0,1,2,4,8,12,18,24,30,36]
bins=np.zeros(len(upper))
tymer(["-i","start"])
flist=open('zips','r')
people=flist.readlines()
#people=['jjenkins']
#for infile in sys.stdin:
tmin=10./3600
w=0.0
tj=0
infile=people[0].strip()
outfile=infile+"_jobs"
overs=pd.read_pickle(infile+".zip")
overs['who']=infile
overs=overs[0:0]
print(overs)
for infile in people:
infile=infile.strip()
outfile=infile+"_jobs"
#if os.path.exists(outfile):
# os.remove(outfile)
try:
jobs= | pd.read_pickle(infile+".zip") | pandas.read_pickle |
#! -*- coding:utf-8 -*-
import os
import re
import gc
import sys
import json
import codecs
import random
import warnings
import numpy as np
import pandas as pd
from tqdm import tqdm
from random import choice
import tensorflow as tf
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import f1_score, accuracy_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
import keras.backend as K
from keras.layers import *
from keras.callbacks import *
from keras.models import Model
from keras.optimizers import Adam
from keras.initializers import glorot_uniform
from keras_bert import load_trained_model_from_checkpoint, Tokenizer
tqdm.pandas()
seed = 2019
random.seed(seed)
tf.set_random_seed(seed)
np.random.seed(seed)
warnings.filterwarnings('ignore')
################################################################
data_path = '../../dataSet/'
train = pd.read_csv(data_path + 'Round2_train.csv', encoding='utf-8')
train2= pd.read_csv(data_path + 'Train_Data.csv', encoding='utf-8')
train=pd.concat([train, train2], axis=0, sort=True)
test = | pd.read_csv(data_path + 'round2_test.csv', encoding='utf-8') | pandas.read_csv |
import pandas as pd
import instances.dinamizators.dinamizators as din
import math
def simplest_test():
'''
Test if the dinamizators are running
'''
df = (
pd.read_pickle('./instances/analysis/df_requests.zip')
.reset_index()
)
din.dinamize_as_berbeglia(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5,
60)
din.dinamize_as_pureza_laporte(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.pickup_lower_tw,
df.pickup_upper_tw,
0)
din.dinamize_as_pankratz(df.depot_location_x,
df.depot_location_y,
df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_upper_tw,
df.delivery_upper_tw,
df.pickup_service_time,
0.5)
din.dinamize_as_fabri_recht(df.pickup_location_x_coord,
df.pickup_location_y_coord,
df.delivery_location_x_coord,
df.delivery_location_y_coord,
df.pickup_lower_tw,
df.delivery_upper_tw)
def test_calculate_travel_time():
pickup_location_x_coord = -1
pickup_location_y_coord = -1
delivery_location_x_coord = 1
delivery_location_y_coord = 1
expected_travel_time = math.ceil(math.sqrt(2) + math.sqrt(2))
calculated_travel_time = (
din.calculate_travel_time(
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord)
)
assert (expected_travel_time == calculated_travel_time)
def test_series_elementwise_max():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_max = pd.Series([3, 2, 3])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all()
def test_dataframe_elementwise_max():
x = pd.DataFrame([[1, 2, 3], [3, 2, 1]])
y = pd.DataFrame([[3, 2, 1], [1, 2, 3]])
expected_max = pd.DataFrame([[3, 2, 3], [3, 2, 3]])
calculated_max = din.elementwise_max(x, y)
assert (expected_max == calculated_max).all().all()
def test_series_elementwise_min():
x = pd.Series([1, 2, 3])
y = pd.Series([3, 2, 1])
expected_min = pd.Series([1, 2, 1])
calculated_min = din.elementwise_min(x, y)
assert (expected_min == calculated_min).all()
def test_dataframe_elementwise_min():
x = pd.DataFrame([[1, 2, 3], [3, 2, 1]])
y = pd.DataFrame([[3, 2, 1], [1, 2, 3]])
expected_min = pd.DataFrame([[1, 2, 1], [1, 2, 1]])
calculated_min = din.elementwise_min(x, y)
assert (expected_min == calculated_min).all().all()
def test_dinamize_as_berbeglia():
pickup_location_x_coord = pd.Series([1])
pickup_location_y_coord = pd.Series([1])
delivery_location_x_coord = pd.Series([-1])
delivery_location_y_coord = pd.Series([-1])
pickup_upper_tw = pd.Series([10.0])
delivery_upper_tw = pd.Series([12.0])
pickup_service_time = pd.Series([1.0])
alpha = 0
beta = 1
# tempo esperado usando a equação de dinamização de berbeglia
expected_arrival_time = pd.Series([7])
calculated_arrival_time = (
din.dinamize_as_berbeglia(
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord,
pickup_upper_tw,
delivery_upper_tw,
pickup_service_time,
alpha,
beta
)
)
assert (expected_arrival_time == calculated_arrival_time).all()
def test_dinamize_as_pureza_laporte():
depot_location_x = pd.Series([0])
depot_location_y = pd.Series([0])
pickup_location_x_coord = pd.Series([1])
pickup_location_y_coord = pd.Series([1])
pickup_lower_tw = pd.Series([2])
pickup_upper_tw = pd.Series([10])
beta = 1
# tempo esperado usando a equação de dinamização de pureza e laporte
expected_arrival_time = 2
calculated_arrival_time = (
din.dinamize_as_pureza_laporte(
depot_location_x,
depot_location_y,
pickup_location_x_coord,
pickup_location_y_coord,
pickup_lower_tw,
pickup_upper_tw,
beta
)
)
assert (expected_arrival_time == calculated_arrival_time).all()
def test_dinamize_as_pankratz():
depot_location_x = pd.Series([0])
depot_location_y = pd.Series([0])
pickup_location_x_coord = pd.Series([-1])
pickup_location_y_coord = pd.Series([-1])
delivery_location_x_coord = pd.Series([1])
delivery_location_y_coord = pd.Series([1])
pickup_upper_tw = pd.Series([10])
delivery_upper_tw = pd.Series([20])
pickup_service_time = pd.Series([1])
beta = 0.6
# tempo esperado usando a equação de dinamização de pankratz e arredondado
# para o próximo inteiro
expected_arrival_time = 5
calculated_arrival_time = (
din.dinamize_as_pankratz(
depot_location_x,
depot_location_y,
pickup_location_x_coord,
pickup_location_y_coord,
delivery_location_x_coord,
delivery_location_y_coord,
pickup_upper_tw,
delivery_upper_tw,
pickup_service_time,
beta
)
)
assert (expected_arrival_time == calculated_arrival_time).all()
def test_dinamize_as_fabri_recht():
pickup_location_x_coord = | pd.Series([-1]) | pandas.Series |
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler
def scal():
dataset = pd.read_csv('../train_cuting/train_cutting2_lstm_mean.csv')
dataset['Timestamp'] = pd.to_datetime(dataset['Timestamp'])
dataset = dataset.set_index('Timestamp')
dataset.index.name = 'date'
scaler = MinMaxScaler(feature_range=(0, 1))
values = dataset['Value']
values = values.values
values = values.reshape(-1, 1)
scaler.fit_transform(values)
return scaler
def data_process_lstm(path1=None, name1=None, path2=None, name2=None, scaler=None):
if path1 == None:
dataset = pd.read_csv(name1)
else:
dataset = pd.read_csv(path1 + '/' + name1)
dataset['Timestamp'] = | pd.to_datetime(dataset['Timestamp']) | pandas.to_datetime |
####
#### Feb 22, 2022
####
"""
After creating the first 250 eval/train set
there are inconsistencies between NASA/Landsat
labels and Forecast/Sentinel labels from experts.
Here we are.
"""
import csv
import numpy as np
import pandas as pd
import datetime
from datetime import date
import time
import scipy
import scipy.signal
import os, os.path
from patsy import cr
# from pprint import pprint
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sb
from pandas.plotting import register_matplotlib_converters
from matplotlib.dates import ConciseDateFormatter
import matplotlib.dates as mdates
from datetime import datetime
register_matplotlib_converters()
import sys
start_time = time.time()
####################################################################################
###
### Aeolus Core path
###
####################################################################################
sys.path.append('/home/hnoorazar/NASA/')
import NASA_core as nc
import NASA_plot_core as ncp
sys.path.append('/home/hnoorazar/remote_sensing_codes/')
import remote_sensing_core as rc
import remote_sensing_plot_core as rcp
####################################################################################
###
### Parameters
###
####################################################################################
county = sys.argv[1]
NDVI_ratio_cut = 0.3
print (county)
####################################################################################
###
### Aeolus Directories
###
####################################################################################
SF_dataPart_dir = "/data/hydro/users/Hossein/NASA/000_shapefile_data_part/"
param_dir = "/data/hydro/users/Hossein/NASA/0000_parameters/"
NASA_raw_dir = "/data/hydro/users/Hossein/NASA/01_raw_GEE/"
NASA_data_dir = "/data/hydro/users/Hossein/NASA/05_SG_TS/"
SOS_plot_dir = "/data/hydro/users/Hossein/NASA/08_Sentinel_landsat_plots/"
print ("_________________________________________________________")
print ("data dir is: " + NASA_data_dir)
print ("_________________________________________________________")
####################################################################################
###
### Read data
###
####################################################################################
if county == "Monterey2014":
raw_names = ["L7_T1C2L2_Scaled_Monterey2014_2013-01-01_2016-01-01.csv",
"L8_T1C2L2_Scaled_Monterey2014_2013-01-01_2016-01-01.csv"]
if county == "AdamBenton2016":
raw_names = ["L7_T1C2L2_Scaled_AdamBenton2016_2015-01-01_2017-10-14.csv",
"L8_T1C2L2_Scaled_AdamBenton2016_2015-01-01_2017-10-14.csv"]
elif county == "FranklinYakima2018":
raw_names = ["L7_T1C2L2_Scaled_FranklinYakima2018_2017-01-01_2019-10-14.csv",
"L8_T1C2L2_Scaled_FranklinYakima2018_2017-01-01_2019-10-14.csv"]
elif county == "Grant2017":
raw_names = ["L7_T1C2L2_Scaled_Grant2017_2016-01-01_2018-10-14.csv",
"L8_T1C2L2_Scaled_Grant2017_2016-01-01_2018-10-14.csv"]
elif county == "Walla2015":
raw_names = ["L7_T1C2L2_Scaled_Walla2015_2014-01-01_2016-12-31.csv",
"L8_T1C2L2_Scaled_Walla2015_2014-01-01_2016-12-31.csv"]
# print ("line 101")
SF_data_name = county + ".csv"
SG_df_NDVI = pd.read_csv(NASA_data_dir + "SG_" + county + "_NDVI_JFD.csv")
SG_df_EVI = pd.read_csv(NASA_data_dir + "SG_" + county + "_EVI_JFD.csv")
eval_tb = pd.read_csv(param_dir + "evaluation_set.csv")
if county == "AdamBenton2016":
eval_tb = eval_tb[eval_tb.county.isin(["Adams", "Benton"])]
elif county == "FranklinYakima2018":
eval_tb = eval_tb[eval_tb.county.isin(["Franklin", "Yakima"])]
elif county == "Grant2017":
eval_tb = eval_tb[eval_tb.county == "Grant"]
elif county == "Walla2015":
eval_tb = eval_tb[eval_tb.county == "Walla Walla"]
# convert the strings to datetime format
SG_df_NDVI['human_system_start_time'] = pd.to_datetime(SG_df_NDVI['human_system_start_time'])
SG_df_EVI['human_system_start_time'] = pd.to_datetime(SG_df_EVI['human_system_start_time'])
# Monterays ID will be read as integer, convert to string
SG_df_EVI["ID"] = SG_df_EVI["ID"].astype(str)
SG_df_NDVI["ID"] = SG_df_NDVI["ID"].astype(str)
"""
Read and Clean the damn raw data
"""
L7 = pd.read_csv(NASA_raw_dir + raw_names[0], low_memory=False)
L8 = pd.read_csv(NASA_raw_dir + raw_names[1], low_memory=False)
NASA_raw_df = pd.concat([L7, L8])
NASA_raw_df["ID"] = NASA_raw_df["ID"].astype(str)
del (L7, L8)
"""
Plots should be exact. Therefore, we need to filter by
last survey year, toss out NASS, and we are sticking to irrigated
fields for now.
"""
SF_data = | pd.read_csv(SF_dataPart_dir + SF_data_name) | pandas.read_csv |
import six
from sklearn.base import TransformerMixin
import pandas as pd
import numpy as np
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing import Imputer
from math import ceil, floor
from builtins import dict
from functools import reduce
class NumericCast(TransformerMixin):
"""
Implementation of pandas friendly numeric cast
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
X_cast = X.apply(lambda x: pd.to_numeric(x, errors='coerce'))
return X_cast
class StringCast(TransformerMixin):
'''
Implementation of pandas friendly string cast
'''
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
Xz = X.astype(str)
Xz = Xz.replace("nan", np.NaN)
return Xz
class DFReplace(TransformerMixin):
'''
Refer https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.replace.html
'''
def __init__(self, to_replace=None, value=None, regex=False):
self.to_replace = to_replace
self.value = value
self.inplace = False
# self.limit = limit
self.regex = regex
self.method = 'pad'
def fit(self, X, y=None):
# Do Nothing
return self
def transform(self, X):
X_replaced = X.replace(to_replace=self.to_replace, value=self.value, inplace=self.inplace,
# limit=self.limit,
regex=self.regex, method=self.method)
return X_replaced
def _Impute(value, S):
return {
'mean': S.mean(),
'median': S.median(),
'most_frequent': S.mode()[0]
}[value]
class DFMissingNum(TransformerMixin):
'''
Replaces missing values by input value or method.Below are the methods available.
'mean': replace missing values using the mean.
'median': replace missing values using the median
'most_frequent': replace missing values using the mode
'backfill' or 'bfill': use NEXT valid observation to fill gap.
'pad' or 'ffill': propagate last valid observation forward to next valid.
Numeric value: Replaces with the input value
Ex: repalce = ""mean"" for replacing with mean, replace = 0 for replacing with the numeric 0
Note: No quotes for numeric values
'''
def __init__(self, replace):
self.replace = replace
self.imp = None
self.statistics_ = None
def fit(self, X, y=None):
if isinstance(self.replace, dict):
for key, value in six.iteritems(self.replace):
if value in ['mean', 'median', 'most_frequent']:
self.replace[key] = _Impute(value=value, S=X[key])
elif self.replace in ['mean', 'median', 'most_frequent']:
self.imp = DFImputer(strategy=self.replace)
self.imp.fit(X)
self.statistics_ = pd.Series(self.imp.statistics_, index=X.columns)
return self
def transform(self, X):
if self.replace in ['mean', 'median', 'most_frequent']:
Ximp = self.imp.transform(X)
X_replaced = | pd.DataFrame(Ximp, index=X.index, columns=X.columns) | pandas.DataFrame |
import gdax
import pandas as pd
import numpy as np
import time
import datetime
class GdaxClient:
def __init__(self):
self.public_client = gdax.PublicClient()
self.max_dataframe_size = 300
self.req_per_sec = 2
def get_historical_data(self, begin, end, granularity = 900, pair = 'ETH-USD'):
"""
Use UTC time
"""
if(end > datetime.datetime.utcnow()):
raise ValueError("End date can't be set in the future")
dt = datetime.timedelta(minutes=granularity/60 * self.max_dataframe_size)
current_time = begin
df_year = pd.DataFrame()
# These transformations must be done due to limitations of the gdax api
# If the time is not rounded down to the nearest granularity value,
# the api returns more data than needed (eg. 351 rows for a difference between end and start of the granularity)
begin = self._round_time(begin, granularity)
end = self._round_time(end, granularity)
while(current_time < end):
if(current_time + dt < end):
data = self.public_client.get_product_historic_rates(pair,
start = current_time,
end = current_time + dt,
granularity=granularity)
current_time += dt
elif(current_time + dt >= end):
data = self.public_client.get_product_historic_rates(pair,
start = current_time,
end = end,
granularity=granularity)
current_time = end
if(data and not isinstance(data,dict)):
df = pd.DataFrame(data, columns=['time','low','high','open', 'close', 'volume'])
df.time = pd.to_datetime(df['time'], unit='s')
df=df.iloc[::-1].reset_index(drop=True)
df_year = df_year.append(df)
time.sleep(1/self.req_per_sec)
df_year = df_year.reset_index(drop=True)
return df_year
def get_market_price(self):
public_client = gdax.PublicClient()
data = public_client.get_product_historic_rates('ETH-EUR', granularity=60)
df = | pd.DataFrame(data, columns=['time','low','high','open', 'close', 'volume']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Calculation of inhomogeneity factor for a population of stacking sequence
@author: <NAME>
"""
import sys
sys.path.append(r'C:\LAYLA')
import numpy as np
import pandas as pd
from src.CLA.lampam_functions import calc_lampam
# Creation of a table of stacking sequences
ss = np.array([0, 45, 90, -45, 0, 45, 90, -45])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = ss
ss = np.array([0, 45, 90, -45, 0, 45, 90, -45])
ss = np.matlib.repmat(ss, 1, 2)
ss = np.ravel(ss)
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 45, 90, -45, 0, 45, 90, -45])
ss = np.matlib.repmat(ss, 1, 3)
ss = np.ravel(ss)
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 45, 90, -45, 0, 45, 90, -45])
ss = np.matlib.repmat(ss, 1, 4)
ss = np.ravel(ss)
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 90, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 0, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, 0, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 90, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 90, 90, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 90, 90, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 90, 90, 90, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 90, 90, 90, 90, 90, 90, 90])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 90, 90, 90, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([90, 90, 90, 90, 90, 90, 90, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([45, 45, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([-45, -45, 0, 0, 0, 0, 0, 0])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, 45, 45])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
ss = np.array([0, 0, 0, 0, 0, 0, -45, -45])
ss = np.hstack((ss, np.flip(ss, axis=0)))
ss = np.array(ss, dtype=str)
ss = ' '.join(ss)
sst = np.vstack((sst, ss))
Pop = | pd.DataFrame() | pandas.DataFrame |
# Import standard python libraries.
import pandas as pd
import numpy as np
import pathlib
import warnings
import sys
# Import the functions used throughout this project from the function dictionary library file
fileDir = pathlib.Path(__file__).parents[2]
code_library_folder = fileDir / 'Code' / 'function_dictionary_library'
sys.path.append(str(code_library_folder))
from coal_data_processing_functions import state_abbreviations, generic_coal_rank, lower_case_data_keys
from statistical_functions import ecdf, weighted_ecdf
from statistics import mean
def weighted_coal_ecdf(coal):
warnings
# Read in (1) COALQUAL Data (2) and the amount of coal mining done in each county. We use skipfooter to not read in the
# search criteria rows.
coalqual_filename = fileDir / 'Data' / 'COALQUAL Data' / 'CQ_upper_level.csv'
COALQUAL = pd.read_csv(coalqual_filename, header=0,
names=['Sample_ID', 'State', 'County', 'Province', 'Region', 'Field', 'Formation', 'Bed',
'Apparent_Rank', 'Sulfur', 'Heat', 'Arsenic', 'Boron', 'Bromine', 'Chlorides',
'Mercury',
'Lead', 'Selenium'], usecols=[0, 1, 2, 5, 6, 7, 9, 11, 28, 84, 87, 147, 151, 159, 165,
191, 219, 239])
mining_volume_filename = fileDir / 'Intermediate' / 'Coal Mining By Counties.csv'
Mining_Volume = pd.read_csv(mining_volume_filename, header=0, names=['Coal_Sales', 'FIPS_Code_State',
'County_Name_State_Normal_Capitalization'],
usecols=[1, 2, 8])
# Drop COALQUAL anthracite and samples with blank apparent rank.
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Anthracite']
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Semianthracite']
COALQUAL = COALQUAL[COALQUAL.Apparent_Rank != 'Rock']
COALQUAL = COALQUAL.dropna(subset=['Apparent_Rank'])
# Classify apparent ranks into broad categories.
COALQUAL['Rank'] = generic_coal_rank(COALQUAL.Apparent_Rank)
# Process the columns that will serve as keys for the data merging.
COALQUAL['State_Abbreviation'] = state_abbreviations(COALQUAL.State)
County_Name_State_Normal_Capitalization = COALQUAL['County'] + ' County, ' + COALQUAL['State_Abbreviation']
COALQUAL['County_Name_State'] = lower_case_data_keys(County_Name_State_Normal_Capitalization)
Mining_Volume['County_Name_State'] = lower_case_data_keys(Mining_Volume['County_Name_State_Normal_Capitalization'])
# mask = pd.Series(np.isfinite(COALQUAL['Chlorides']))
COALQUAL_all_samples_Cl = COALQUAL.loc[pd.Series(np.isfinite(COALQUAL['Chlorides']))]
COALQUAL_all_samples_Br = COALQUAL.loc[pd.Series(np.isfinite(COALQUAL['Bromine']))]
COALQUAL_all_samples_Cl = COALQUAL_all_samples_Cl.groupby(['County_Name_State']).mean()
COALQUAL_all_samples_Cl['County_Name_State'] = COALQUAL_all_samples_Cl.index
COALQUAL_all_samples_Cl = pd.merge(COALQUAL_all_samples_Cl, Mining_Volume, on='County_Name_State')
COALQUAL_all_samples_Br = COALQUAL_all_samples_Br.groupby(['County_Name_State']).mean()
COALQUAL_all_samples_Br['County_Name_State'] = COALQUAL_all_samples_Br.index
COALQUAL_all_samples_Br = pd.merge(COALQUAL_all_samples_Br, Mining_Volume, on='County_Name_State')
qe_Cl_All, pe_Cl_All = weighted_ecdf(COALQUAL_all_samples_Cl['Chlorides'], COALQUAL_all_samples_Cl['Coal_Sales'])
qe_Br_All, pe_Br_All = weighted_ecdf(COALQUAL_all_samples_Br['Bromine'], COALQUAL_all_samples_Br['Coal_Sales'])
# For Appalachian Low Sulfur Coal
if coal == 'Appalachian Low Sulfur':
COALQUAL = COALQUAL[
(COALQUAL['Region'] == 'SOUTHERN APPALACHIAN') | (COALQUAL['Region'] == 'CENTRAL APPALACHIAN')
| (COALQUAL['Region'] == 'NORTHERN APPALACHIAN')]
# USGS Circular 891 defines "low sulfur coal" as less than 1% total sulfur (https://pubs.usgs.gov/circ/c891/glossary.htm).
# This is identical to the standard used by the EIA.
COALQUAL = COALQUAL[COALQUAL['Sulfur'] < 1]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
#Chlorides = [x for x in COALQUAL['Chlorides'] if x != '']
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8188 # Btu/kWh
FGD_water_treatment = 2.14e-4 # m^3/kWh
# For Appalachian Medium Sulfur Coal
elif coal == 'Appalachian Med Sulfur':
COALQUAL = COALQUAL[
(COALQUAL['Region'] == 'SOUTHERN APPALACHIAN') | (COALQUAL['Region'] == 'CENTRAL APPALACHIAN') | (
COALQUAL['Region'] == 'NORTHERN APPALACHIAN')]
COALQUAL = COALQUAL[(COALQUAL['Sulfur'] > 1) & (COALQUAL['Sulfur'] < 3)]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8210 # Btu/kWh
FGD_water_treatment = 2.20e-4 # m^3/kWh
# For Beulah-Zap Bed Coal
elif coal == 'Beulah-Zap':
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'BEULAH-ZAP')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
qe_Br = qe_Br_All
pe_Br = pe_Br_All
gross_heat_rate = 8680 # Btu/kWh
FGD_water_treatment = 2.36e-4 # m^3/kWh
# For Illinois #6 Coal
elif coal == 'Illinois #6':
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'HERRIN NO 6')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
qe_Cl = qe_Cl_All
pe_Cl = pe_Cl_All
gross_heat_rate = (8279 + 8319) / 2 # Btu/kWh
FGD_water_treatment = 2.22e-4 # m^3/kWh
# For ND Lignite Coal
elif coal == 'ND Lignite':
COALQUAL = COALQUAL[(COALQUAL['State'] == 'North Dakota') & (COALQUAL['Rank'] == 'LIGNITE')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
qe_Br = qe_Br_All
pe_Br = pe_Br_All
gross_heat_rate = 8865 # Btu/kWh
FGD_water_treatment = 2.39e-4 # m^3/kWh
# For Pocahontas #3 Seam Coal
elif coal == "Pocahontas #3":
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'POCAHONTAS NO 3')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8099 # Btu/kWh
FGD_water_treatment = 2.19e-4 # m^3/kWh
# For Upper Freeport Coal
elif coal == 'Upper Freeport':
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'UPPER FREEPORT')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8104 # Btu/kWh
FGD_water_treatment = 2.11e-4 # m^3/kWh
# For WPC Utah Coal
elif coal == 'WPC Utah':
COALQUAL = COALQUAL[(COALQUAL['Region'] == 'SOUTHWESTERN UTAH')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
qe_Br = qe_Br_All
pe_Br = pe_Br_All
gross_heat_rate = 8347 # Btu/kWh
FGD_water_treatment = 2.42e-4 # m^3/kWh
# For Wyodak Coal
elif coal == 'Wyodak':
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'WYODAK')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
qe_Br = qe_Br_All
pe_Br = pe_Br_All
gross_heat_rate = 8192 # Btu/kWh
FGD_water_treatment = 1.66e-4 # m^3/kWh
# For Wyodak-Anderson Coal
elif coal == '<NAME>':
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'WYODAK-ANDERSON')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
qe_Br = qe_Br_All
pe_Br = pe_Br_All
gross_heat_rate = 8585 # Btu/kWh
FGD_water_treatment = 2.32e-4 # m^3/kWh
# For Wyoming PRB Coal
elif coal == 'Wyoming PRB':
COALQUAL = COALQUAL[(COALQUAL['Region'] == 'POWDER RIVER') & (COALQUAL['State'] == 'Wyoming')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8588 # Btu/kWh
FGD_water_treatment = 2.28e-4 # m^3/kWh
# For Bituminous Coal
elif coal == 'Bituminous':
COALQUAL = COALQUAL[(COALQUAL['Rank'] == 'BITUMINOUS')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8188 # Btu/kWh
FGD_water_treatment = 2.14e-4 # m^3/kWh
# For Subbituminous Coal
elif coal == 'Subbituminous':
COALQUAL = COALQUAL[(COALQUAL['Rank'] == 'SUBBITUMINOUS')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Br, pe_Br = weighted_ecdf(bromine['Bromine'], bromine['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
gross_heat_rate = 8588 # Btu/kWh
FGD_water_treatment = 2.28e-4 # m^3/kWh
# For Lignite Coal
elif coal == 'Lignite':
COALQUAL = COALQUAL[(COALQUAL['Rank'] == 'LIGNITE')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
selenium = COALQUAL[np.isfinite(COALQUAL['Selenium'])]
boron = COALQUAL[np.isfinite(COALQUAL['Boron'])]
lead = COALQUAL[np.isfinite(COALQUAL['Lead'])]
arsenic = COALQUAL[np.isfinite(COALQUAL['Arsenic'])]
mercury = COALQUAL[np.isfinite(COALQUAL['Mercury'])]
heat = COALQUAL[np.isfinite(COALQUAL['Heat'])]
sulfur = COALQUAL[np.isfinite(COALQUAL['Sulfur'])]
qe_Cl, pe_Cl = weighted_ecdf(chlorine['Chlorides'], chlorine['Coal_Sales'])
qe_Se, pe_Se = weighted_ecdf(selenium['Selenium'], selenium['Coal_Sales'])
qe_B, pe_B = weighted_ecdf(boron['Boron'], boron['Coal_Sales'])
qe_Pb, pe_Pb = weighted_ecdf(lead['Lead'], lead['Coal_Sales'])
qe_As, pe_As = weighted_ecdf(arsenic['Arsenic'], arsenic['Coal_Sales'])
qe_Hg, pe_Hg = weighted_ecdf(mercury['Mercury'], mercury['Coal_Sales'])
qe_Heat, pe_Heat = weighted_ecdf(heat['Heat'], heat['Coal_Sales'])
qe_Sulfur, pe_Sulfur = weighted_ecdf(sulfur['Sulfur'], sulfur['Coal_Sales'])
qe_Br = qe_Br_All
pe_Br = pe_Br_All
gross_heat_rate = 8865 # Btu/kWh
FGD_water_treatment = 2.39e-4 # m^3/kWh
# For Quality Guidelines for Energy System Studies - Illinois #6 coal (The bituminous coal used for the 550 MW
# Bituminous Baseline)
elif coal == 'QGESS Bituminous':
COALQUAL = COALQUAL[(COALQUAL['Bed'] == 'HERRIN NO 6')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
qe_Cl = [1671, 1671]
pe_Cl = [0, 1]
qe_Se = [1.9, 1.9]
pe_Se = [0, 1]
qe_B = [90, 90]
pe_B = [0, 1]
qe_Br = [np.average(bromine['Bromine'], weights=bromine['Coal_Sales']),
np.average(bromine['Bromine'], weights=bromine['Coal_Sales'])]
pe_Br = [0, 1]
qe_Pb = [24, 24]
pe_Pb = [0, 1]
qe_As = [7.5, 7.5]
pe_As = [0, 1]
qe_Hg = [0.09, 0.09]
pe_Hg = [0, 1]
qe_Heat = [11666, 11666]
pe_Heat = [0, 1]
qe_Sulfur = [2.51, 2.51]
pe_Sulfur = [0, 1]
gross_heat_rate = (8279+8319)/2 #Btu/kWh
FGD_water_treatment = 2.22e-4 #m^3/kWh
# For Quality Guidelines for Energy System Studies - Illinois #6 coal (The bituminous coal used for the 550 MW
# Bituminous Baseline)
elif coal == 'QGESS Subbituminous':
COALQUAL = COALQUAL[(COALQUAL['Region'] == 'POWDER RIVER') & (COALQUAL['State'] == 'Wyoming')]
COALQUAL = COALQUAL.groupby(['County_Name_State']).mean()
COALQUAL['County_Name_State'] = COALQUAL.index
COALQUAL = pd.merge(COALQUAL, Mining_Volume, on='County_Name_State')
chlorine = COALQUAL[np.isfinite(COALQUAL['Chlorides'])]
bromine = COALQUAL[np.isfinite(COALQUAL['Bromine'])]
qe_Cl = [np.average(chlorine['Chlorides'], weights=chlorine['Coal_Sales']),
np.average(chlorine['Chlorides'], weights=chlorine['Coal_Sales'])]
pe_Cl = [0, 1]
qe_Se = [0.3, 0.3]
pe_Se = [0, 1]
qe_B = [43, 43]
pe_B = [0, 1]
qe_Br = [np.average(bromine['Bromine'], weights=bromine['Coal_Sales']),
np.average(bromine['Bromine'], weights=bromine['Coal_Sales'])]
pe_Br = [0, 1]
qe_Pb = [5, 5]
pe_Pb = [0, 1]
qe_As = [1.5, 1.5]
pe_As = [0, 1]
qe_Hg = [0.1, 0.1]
pe_Hg = [0, 1]
qe_Heat = [8800, 8800]
pe_Heat = [0, 1]
qe_Sulfur = [0.22, 0.22]
pe_Sulfur = [0, 1]
gross_heat_rate = (8279+8319)/2 #Btu/kWh
FGD_water_treatment = 2.22e-4 #m^3/kWh
return qe_Cl, pe_Cl, qe_Se, pe_Se, qe_B, pe_B, qe_Br, pe_Br, qe_Pb, pe_Pb, qe_As, pe_As, qe_Hg, pe_Hg, qe_Heat, \
pe_Heat, qe_Sulfur, pe_Sulfur, gross_heat_rate, FGD_water_treatment
def coal_ecdf(coal):
# Read in Coal Qual Data on the Samples.
# For frozen code:
fileDir = pathlib.Path(__file__).parents[1]
samples_filename = fileDir / 'newData' / 'COALQUAL Data' / 'Coal Qual Sample Data.csv'
trace_element_filename = fileDir / 'newData' / 'COALQUAL Data' / 'Coal Qual Trace Element Data.csv'
ultimate_analysis_filename = fileDir / 'newData' / 'COALQUAL Data' / 'Coal Qual Ultimate Analysis Data.csv'
# For original python code
# fileDir = pathlib.Path(__file__).parents[2]
# code_library_folder = fileDir / 'Code' / 'function_dictionary_library'
# sys.path.append(str(code_library_folder))
#
# fileDir = pathlib.Path(__file__).parents[2]
# samples_filename = fileDir / 'Data' / 'COALQUAL Data' / 'Coal Qual Sample Data.csv'
# trace_element_filename = fileDir / 'Data' / 'COALQUAL Data' / 'Coal Qual Trace Element Data.csv'
# ultimate_analysis_filename = fileDir / 'Data' / 'COALQUAL Data' / 'Coal Qual Ultimate Analysis Data.csv'
warnings
# Note that we use skipfooter to not read in the search criteria column.
Samples = pd.read_csv(samples_filename, header=1,
names=['Sample_ID', 'State', 'County', 'Region', 'Field', 'Formation', 'Bed', 'Rank'],
usecols=[0, 1, 2, 6, 7, 9, 11, 27], engine='python', skipfooter=2)
Trace_Element = pd.read_csv(trace_element_filename, header=1, names=['Sample_ID', 'Arsenic', 'Boron', 'Bromine',
'Chlorides', 'Mercury', 'Lead', 'Selenium'],
usecols=[0, 23, 27, 35, 41, 67, 95, 115], engine='python', skipfooter=2)
Ultimate_Analysis = pd.read_csv(ultimate_analysis_filename, header=1, names=['Sample_ID', 'Sulfur', 'Heat'],
usecols=[0, 18, 21], engine='python', skipfooter=2)
# Merge data together
COALQUAL = pd.merge(Samples, Trace_Element, on='Sample_ID')
COALQUAL = | pd.merge(COALQUAL, Ultimate_Analysis, on='Sample_ID') | pandas.merge |
from skimage import io
# import SpaGCN as spg
from SpaGCN2 import SpaGCN
import cv2
import numpy as np
from sklearn.decomposition import PCA
from SpaGCN2.calculate_adj import calculate_adj_matrix
import argparse
import scanpy as sc
from src.graph_func import graph_construction
from src.SEDR_train import SEDR_Train
import random, torch
import pandas as pd
import os
import shutil
import anndata
def generate_embedding_sp(anndata, pca, res, img_path,pca_opt):
# image = io.imread("img/"+sample+".png") # value
# Calculate adjacent matrix
# b = 49
random.seed(200)
torch.manual_seed(200)
np.random.seed(200)
b = 49
a = 1
x2 = anndata.obs["array_row"].tolist()
x3 = anndata.obs["array_col"].tolist()
x4 = anndata.obs["pxl_col_in_fullres"]
x5 = anndata.obs["pxl_row_in_fullres"]
if img_path != None:
image = io.imread(img_path)
# print(image)
max_row = max_col = int((2000 / anndata.uns['tissue_hires_scalef']) + 1)
x4 = x4.values * (image.shape[0] / max_row)
x4 = x4.astype(np.int)
x4 = x4.tolist()
x5 = x5.values * (image.shape[1] / max_col)
x5 = x5.astype(np.int)
x5 = x5.tolist()
adj = calculate_adj_matrix(x=x2, y=x3, x_pixel=x4, y_pixel=x5, image=image, beta=b, alpha=a,
histology=True) # histology optional
else:
x4 = x4.tolist()
x5 = x5.tolist()
adj = calculate_adj_matrix(x=x2, y=x3, x_pixel=x4, y_pixel=x5, beta=b, alpha=a,
histology=False)
# print(adj[2000].size)
# print(adj.shape)
p = 0.5
# l = spg.find_l(p=p, adj=adj, start=0.75, end=0.8, sep=0.001, tol=0.01)
l = 1.43
# res = 0.6
clf = SpaGCN()
clf.set_l(l)
# Init using louvain
# clf.train(anndata, adj,num_pcs=pca, init_spa=True, init="louvain",louvain_seed=0, res=res, tol=5e-3)
clf.train(anndata, adj, num_pcs=pca, init_spa=True, init="louvain", res=res, tol=5e-3,pca_opt = pca_opt)
y_pred, prob, z = clf.predict_with_embed()
return z
def generate_embedding_sc(anndata, sample, scgnnsp_dist, scgnnsp_alpha, scgnnsp_k, scgnnsp_zdim, scgnnsp_bypassAE):
scGNNsp_folder = "scGNNsp_space/"
if not os.path.exists(scGNNsp_folder):
os.makedirs(scGNNsp_folder)
datasetName = sample+'_'+scgnnsp_zdim+'_'+scgnnsp_alpha+'_'+scgnnsp_k+'_'+scgnnsp_dist+'_logcpm'
scGNNsp_data_folder = scGNNsp_folder + datasetName + '/'
if not os.path.exists(scGNNsp_data_folder):
os.makedirs(scGNNsp_data_folder)
coords_list = [list(t) for t in zip(anndata.obs["array_row"].tolist(), anndata.obs["array_col"].tolist())]
if not os.path.exists(scGNNsp_data_folder + 'coords_array.npy'):
np.save(scGNNsp_data_folder + 'coords_array.npy', np.array(coords_list))
original_cpm_exp = anndata.X.A.T
# original_cpm_exp = pd.read_csv('scGNNsp_space/151507_logcpm_test/151507_human_brain_ex.csv', index_col=0).values
if not os.path.exists(scGNNsp_data_folder + sample + '_logcpm_expression.csv'):
pd.DataFrame(original_cpm_exp).to_csv(scGNNsp_data_folder + sample + '_logcpm_expression.csv')
os.chdir(scGNNsp_folder)
command_preprocessing = 'python -W ignore PreprocessingscGNN.py --datasetName ' + sample + '_logcpm_expression.csv --datasetDir ' + datasetName + '/ --LTMGDir ' + datasetName + '/ --filetype CSV --cellRatio 1.00 --geneSelectnum 2000 --transform None'
if not os.path.exists(datasetName + '/Use_expression.csv'):
os.system(command_preprocessing)
# python -W ignore PreprocessingscGNN.py --datasetName 151507_human_brain_ex.csv --datasetDir 151507_velocity/ --LTMGDir 151507_velocity/ --filetype CSV --cellRatio 1.00 --geneSelectnum 2000 --transform None
scgnnsp_output_folder = 'outputdir-3S-' + datasetName + '_EM1_resolution0.3_' + scgnnsp_dist + '_dummy_add_PEalpha' + scgnnsp_alpha + '_k' + scgnnsp_k +'_zdim' + scgnnsp_zdim+ '_NA/'
scgnnsp_output_embedding_csv = datasetName + '_' + scgnnsp_k + '_' + scgnnsp_dist + '_NA_dummy_add_' + scgnnsp_alpha + '_intersect_160_GridEx19_embedding.csv'
command_scgnnsp = 'python -W ignore scGNNsp.py --datasetName ' + datasetName + ' --datasetDir ./ --outputDir ' + scgnnsp_output_folder + ' --resolution 0.3 --nonsparseMode --EM-iteration 1 --useSpatial --model PAE --useGAEembedding --saveinternal --no-cuda --debugMode savePrune --saveinternal --GAEhidden2 3 --prunetype spatialGrid --PEtypeOp add --pe-type dummy'
command_scgnnsp = command_scgnnsp + " --knn-distance " + scgnnsp_dist
command_scgnnsp = command_scgnnsp + " --PEalpha " + scgnnsp_alpha
command_scgnnsp = command_scgnnsp + " --k " + scgnnsp_k
command_scgnnsp = command_scgnnsp + " --zdim " + scgnnsp_zdim
if scgnnsp_bypassAE:
scgnnsp_output_folder = 'outputdir-3S-' + datasetName + '_EM1_resolution0.3_' + scgnnsp_dist + '_dummy_add_PEalpha' + scgnnsp_alpha + '_k' + scgnnsp_k + '_NA_bypassAE/'
command_scgnnsp = 'python -W ignore scGNNsp.py --datasetName ' + datasetName + ' --datasetDir ./ --outputDir ' + scgnnsp_output_folder + ' --resolution 0.3 --nonsparseMode --EM-iteration 1 --useSpatial --model PAE --useGAEembedding --saveinternal --no-cuda --debugMode savePrune --saveinternal --GAEhidden2 3 --prunetype spatialGrid --PEtypeOp add --pe-type dummy'
command_scgnnsp = command_scgnnsp + " --knn-distance " + scgnnsp_dist
command_scgnnsp = command_scgnnsp + " --PEalpha " + scgnnsp_alpha
command_scgnnsp = command_scgnnsp + " --k " + scgnnsp_k
command_scgnnsp = command_scgnnsp + " --zdim " + scgnnsp_zdim
command_scgnnsp = command_scgnnsp + " --bypassAE"
if not os.path.exists(scgnnsp_output_folder + scgnnsp_output_embedding_csv):
os.system(command_scgnnsp)
scgnnsp_output_embedding = | pd.read_csv(scgnnsp_output_folder + scgnnsp_output_embedding_csv, index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Input data files are available in the "../../../input/koki25ando_hostel-world-dataset/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../../../input/koki25ando_hostel-world-dataset"))
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Created on Mon Sep 10 21:45:05 2018 @author: vino """
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # for visualization
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelBinarizer
from pandas.tools.plotting import scatter_matrix
from sklearn.preprocessing import Imputer
from sklearn import linear_model
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
'''Reading data from CSV file'''
# data_set = pd.read_csv("../../../input/koki25ando_hostel-world-dataset/Hostel.csv")
data_set = pd.read_csv("../../../input/koki25ando_hostel-world-dataset/Hostel.csv")
'''Info about data set'''
print(data_set.info())
print(data_set.columns) # Gives all the column names.
print(data_set.shape) # Number of rows and columns.
'''********************Data Cleaning********************'''
corr_data_set = data_set
print(corr_data_set['City'].value_counts(), corr_data_set['rating.band'].value_counts())
'''In corr_data_set rating.band is a categorical value and we are filling the missing value of it using unknown category.'''
corr_data_set['rating.band'] = corr_data_set['rating.band'].fillna('Unknown')
'''Converting categorical value to numerical attributes using label encoder.'''
label_encoder = LabelEncoder()
for key in ['rating.band', 'City']:
print(corr_data_set[key].value_counts())
corr_data_set[key] = label_encoder.fit_transform(corr_data_set[key])
print(corr_data_set[key])
'''One hot encoding technique.'''
one_hot_encoder = OneHotEncoder()
for key in ['rating.band', 'City']:
print(corr_data_set[key].value_counts())
corr_data_set[key] = one_hot_encoder.fit_transform( (corr_data_set[key].values).reshape(-1,1))
print(corr_data_set[key])
#'''Converts categorical to label encoding and one hot encoding in a single
# shot'''
#label_binarizer = LabelBinarizer()
#for key in ['rating.band', 'City']:
# print(corr_data_set[key].value_counts())
# corr_data_set[key] = label_binarizer.fit_transform(corr_data_set[key])
# print(corr_data_set[key].value_counts())
'''Removing string from distance and making it as numerical variable for linear regression'''
print(corr_data_set['Distance'])
for data in corr_data_set['Distance']:
temp = data.split('km')
corr_data_set['Distance'] = temp[0]
| pd.to_numeric(corr_data_set['Distance'], errors='ignore') | pandas.to_numeric |
import mrcnn.model as modellib
import os
import sys
import cv2
import random
import numpy as np
import pandas as pd
import deeplabcut
import json
import skimage
import skimage.io
from skimage.util import img_as_ubyte, img_as_float
from skimage import morphology, measure, filters
from shutil import copyfile
from skimage.measure import regionprops
from skimage.measure import find_contours
from skimage.morphology import square, dilation
from skimage.color import rgb2gray
from .mouse import MouseDataset
from .mouse import InferenceConfig
from .shape import shapes_to_labels_masks
from multiprocessing import Pool
import shutil
import time
import errno
import ntpath
import glob
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
def onerror(function, path, exc_info):
# Handle ENOTEMPTY for rmdir
if (function is os.rmdir
and issubclass(exc_info[0], OSError)
and exc_info[1].errno == errno.ENOTEMPTY):
timeout = 0.001
while timeout < 2:
if not os.listdir(path):
return os.rmdir(path)
time.sleep(timeout)
timeout *= 2
raise
def clean_dir_safe(path):
if os.path.isdir(path):
shutil.rmtree(path, onerror=onerror)
# rmtree didn't fail, but path may still be linked if there is or was
# a handle that shares delete access. Assume the owner of the handle
# is watching for changes and will close it ASAP. So retry creating
# the directory by using a loop with an increasing timeout.
timeout = 0.001
while True:
try:
return os.mkdir(path)
except PermissionError as e:
# Getting access denied (5) when trying to create a file or
# directory means either the caller lacks access to the
# parent directory or that a file or directory with that
# name exists but is in the deleted state. Handle both cases
# the same way. Otherwise, re-raise the exception for other
# permission errors, such as a sharing violation (32).
if e.winerror != 5 or timeout >= 2:
raise
time.sleep(timeout)
timeout *= 2
def video2frames(video_dir):
"""Convert a video into frames saved in a directory named as the video name.
Args:
video_dir: path to the video
"""
cap = cv2.VideoCapture(video_dir)
nframes = int(cap.get(7))
data_dir = os.path.splitext(video_dir)[0]
frames_dir = os.path.join(data_dir, "images")
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists(frames_dir):
os.mkdir(frames_dir)
for index in range(nframes):
cap.set(1, index) # extract a particular frame
ret, frame = cap.read()
if ret:
image = img_as_ubyte(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
img_name = os.path.join(frames_dir, str(index) + ".jpg")
skimage.io.imsave(img_name, image)
return frames_dir
def background_subtraction(frames_dir, background_dir):
"""Generate foregrounds corresponding to frames
Args:
frames_dir: path to directory containing frames
background_dir: path to the background image
Returns:
components: 1D array of number of blobs in each frame.
"""
fg_dir = os.path.join(os.path.dirname(frames_dir), 'FG')
try:
os.mkdir(fg_dir)
except FileExistsError:
shutil.rmtree(fg_dir)
os.mkdir(fg_dir)
bg = img_as_float(skimage.io.imread(background_dir))
if bg.ndim == 3:
bg = rgb2gray(bg)
threshold = bg * 0.5
frames_list = os.listdir(frames_dir)
components = np.zeros(len(frames_list), dtype=int)
for frame in range(len(frames_list)):
im = img_as_float(skimage.io.imread(
os.path.join(frames_dir, str(frame) + '.jpg')))
if im.ndim == 3:
im = rgb2gray(im)
fg = (bg - im) > threshold
bw1 = morphology.remove_small_objects(fg, 1000)
bw2 = morphology.binary_closing(bw1, morphology.disk(radius=10))
bw3 = morphology.binary_opening(bw2, morphology.disk(radius=10))
label = measure.label(bw3)
num_fg = np.max(label)
masks = np.zeros([bg.shape[0], bg.shape[1], 3], dtype=np.uint8)
if num_fg == 2:
bw3_1 = label == 1
bw4_1 = morphology.binary_closing(
bw3_1, morphology.disk(radius=30))
bw5_1 = filters.median(bw4_1, morphology.disk(10))
bw3_2 = label == 2
bw4_2 = morphology.binary_closing(
bw3_2, morphology.disk(radius=30))
bw5_2 = filters.median(bw4_2, morphology.disk(10))
# masks[:, :, 0] = img_as_bool(bw5_1)
# masks[:, :, 1] = img_as_bool(bw5_2)
masks[:, :, 0] = img_as_ubyte(bw5_1)
masks[:, :, 1] = img_as_ubyte(bw5_2)
else:
masks[:, :, 0] = img_as_ubyte(bw3)
components[frame] = num_fg
# masks = masks.astype(np.uint8)
skimage.io.imsave(os.path.join(fg_dir, str(frame) + '.png'), masks)
components_df = pd.DataFrame({'components': components})
components_df.to_csv(os.path.join(os.path.dirname(
frames_dir), 'components.csv'), index=False)
return components
def split_train_val(dataset_dir, frac_split_train):
"""Split a dataset into subsets train and val inside dataset directory
Args:
dataset_dir: path to the dataset containing images and their annotation json files
frac_split_train: fraction of train subset in the dataset
Returns:
"""
json_ids = [f for f in os.listdir(dataset_dir) if f.endswith('.json')]
random.shuffle(json_ids)
train_dir = os.path.join(dataset_dir, 'train')
os.mkdir(train_dir)
val_dir = os.path.join(dataset_dir, 'val')
os.mkdir(val_dir)
for json_id in json_ids[: int(frac_split_train * len(json_ids))]:
copyfile(os.path.join(dataset_dir, json_id),
os.path.join(train_dir, json_id))
os.remove(os.path.join(dataset_dir, json_id))
copyfile(os.path.join(dataset_dir, os.path.splitext(json_id)[0] + '.jpg'),
os.path.join(train_dir, os.path.splitext(json_id)[0] + '.jpg'))
os.remove(os.path.join(
dataset_dir, os.path.splitext(json_id)[0] + '.jpg'))
for json_id in json_ids[int(frac_split_train * len(json_ids)):]:
copyfile(os.path.join(dataset_dir, json_id),
os.path.join(val_dir, json_id))
os.remove(os.path.join(dataset_dir, json_id))
copyfile(os.path.join(dataset_dir, os.path.splitext(json_id)[0] + '.jpg'),
os.path.join(val_dir, os.path.splitext(json_id)[0] + '.jpg'))
os.remove(os.path.join(
dataset_dir, os.path.splitext(json_id)[0] + '.jpg'))
def create_dataset(images_dir, components_info, num_annotations):
"""Randomly choose images which have one blob in their foreground
Args:
images_dir: path to images directory
components_info: path to a csv file or an array
num_annotations: the number of images will be picked
Returns:
"""
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
dataset_dir = os.path.join(os.path.dirname(images_dir), 'dataset')
os.mkdir(dataset_dir)
touching = [i for i in range(len(components)) if components[i] == 1]
if (components == 1).sum() > num_annotations:
random.shuffle(touching)
for image_id in touching[:num_annotations]:
copyfile(os.path.join(images_dir, str(image_id) + '.jpg'),
os.path.join(dataset_dir, str(image_id) + '.jpg'))
else:
for image_id in touching:
copyfile(os.path.join(images_dir, str(image_id) + '.jpg'),
os.path.join(dataset_dir, str(image_id) + '.jpg'))
def correct_segmentation_errors(components_info, fix_dir, frames_dir):
"""Count and pick one failed frame in every 3 consecutive fail frames for correcting
Args:
components_info: path to a csv file or an array
fix_dir: path to directory for saving frames chosen
frames_dir: path to directory containing frames
Returns:
correct_frames: the number of frames picked up
"""
if isinstance(components_info, str):
components = pd.read_csv(components_info)
components = np.array(components.loc[:, 'components'])
else:
components = components_info
errors = np.array(components != 2, dtype=int)
errors_accumulate = np.zeros(len(errors))
interval_start = 0
for i in range(len(errors)):
if (errors[i] == 1) & (interval_start == 0):
interval_start = 1
elif errors[i] == 0:
interval_start = 0
if (interval_start == 1) & (i > 0):
errors_accumulate[i] = errors_accumulate[i - 1] + 1
# plt.plot(errors_accumulate)
correct_frames = 0
if components[0] != 2:
copyfile(os.path.join(frames_dir, '0.jpg'),
os.path.join(fix_dir, '0.jpg'))
correct_frames = correct_frames + 1
for i in range(len(errors_accumulate)):
if (errors_accumulate[i] > 0) & (errors_accumulate[i] % 3 == 0):
copyfile(os.path.join(frames_dir, str(i) + '.jpg'),
os.path.join(fix_dir, str(i) + '.jpg'))
correct_frames = correct_frames + 1
return correct_frames
def tracking_inference(fg_dir, components_info):
"""Track the identities of mice
Args:
fg_dir: path to directory containing foreground
components_info: path to a csv file or an array
"""
tracking_dir = os.path.join(os.path.dirname(fg_dir), 'tracking')
if not os.path.exists(tracking_dir):
os.mkdir(tracking_dir)
if isinstance(components_info, str):
components = | pd.read_csv(components_info) | pandas.read_csv |
"""Pytest fixtures."""
import pytest
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from hcrystalball.wrappers import ProphetWrapper
from hcrystalball.wrappers import ExponentialSmoothingWrapper
from hcrystalball.wrappers import TBATSWrapper
from hcrystalball.wrappers import SarimaxWrapper
from hcrystalball.wrappers import get_sklearn_wrapper
from hcrystalball.ensemble import StackingEnsemble, SimpleEnsemble
import pandas._testing as tm
random_state = np.random.RandomState(123)
tm.N = 100 # 100 rows
tm.K = 1 # 1 column
@pytest.fixture(scope="module")
def wrapper_instance(request):
if request.param == "prophet":
return ProphetWrapper(daily_seasonality=False, weekly_seasonality=False, yearly_seasonality=False)
elif request.param == "smoothing":
return ExponentialSmoothingWrapper(trend="add")
elif request.param == "tbats":
return TBATSWrapper(use_arma_errors=False, use_box_cox=False)
elif request.param == "sklearn":
return get_sklearn_wrapper(LinearRegression, lags=4)
elif request.param == "sarimax":
return SarimaxWrapper(order=(1, 1, 0), seasonal_order=(1, 1, 1, 2))
elif request.param == "stacking_ensemble":
return StackingEnsemble(
base_learners=[
ExponentialSmoothingWrapper(name="smoot_exp1", trend="add"),
ExponentialSmoothingWrapper(name="smoot_exp2"),
],
meta_model=LinearRegression(),
horizons_as_features=False,
weekdays_as_features=False,
)
elif request.param == "simple_ensemble":
return SimpleEnsemble(
base_learners=[
ExponentialSmoothingWrapper(name="smoot_exp1", trend="add"),
ExponentialSmoothingWrapper(name="smoot_exp2"),
]
)
@pytest.fixture(scope="module")
def wrapper_instance_capped(request):
if request.param.split(";")[0] == "prophet":
return ProphetWrapper(
daily_seasonality=False,
weekly_seasonality=False,
yearly_seasonality=False,
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "smoothing":
return ExponentialSmoothingWrapper(
trend="add",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "tbats":
return TBATSWrapper(
use_arma_errors=False,
use_box_cox=False,
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "sklearn":
return get_sklearn_wrapper(
LinearRegression,
lags=4,
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "sarimax":
return SarimaxWrapper(
order=(1, 1, 0),
seasonal_order=(1, 1, 1, 2),
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "stacking_ensemble":
return StackingEnsemble(
base_learners=[
ExponentialSmoothingWrapper(
name="smoot_exp1",
trend="add",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
),
ExponentialSmoothingWrapper(
name="smoot_exp2",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
),
],
meta_model=LinearRegression(),
horizons_as_features=False,
weekdays_as_features=False,
train_n_splits=1,
train_horizon=10,
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
)
elif request.param.split(";")[0] == "simple_ensemble":
return SimpleEnsemble(
base_learners=[
ExponentialSmoothingWrapper(
name="smoot_exp1",
trend="add",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
),
ExponentialSmoothingWrapper(
name="smoot_exp2",
clip_predictions_lower=float(request.param.split(";")[1]),
clip_predictions_upper=float(request.param.split(";")[2]),
),
]
)
@pytest.fixture(scope="module")
def X_y_linear_trend(request):
if request.param[-1] not in ("D", "W", "M", "Q", "Y"):
raise ValueError("Invalid `X_y_with_freq` fixture param.")
X = pd.DataFrame(
pd.date_range(start="2019-01-01", periods=100, freq=request.param.split("freq_")[1][0]),
columns=["date"],
)
if "negative" in request.param:
y = pd.Series(np.linspace(start=80, stop=-19, num=100))
else:
y = pd.Series(np.linspace(start=1, stop=100, num=100))
if "more_cols" in request.param:
X["trend"] = y + 10
X["one_hot"] = np.repeat([1, 2, 3, 4], len(X) / 4)
if "country_col" in request.param:
X["country"] = "DE"
if "ndarray" in request.param:
y = y.values
if "NaN_y" in request.param:
y[::9] = np.nan
if "Inf_y" in request.param:
y[::15] = np.inf
y[::16] = -np.inf
return X.set_index("date"), y
@pytest.fixture(scope="module")
def X_y_optional(request):
X = pd.DataFrame(index=pd.date_range(start="2019-01-01", periods=300))
if request.param == "just_X":
y = None
else:
y = np.arange(X.shape[0])
return X, y
@pytest.fixture(scope="module")
def X_with_holidays():
from hcrystalball.feature_extraction import HolidayTransformer
X = pd.DataFrame(index=pd.date_range(start="2019-01-01", periods=300))
holidays = HolidayTransformer(country_code="DE").fit_transform(X)
return X.join(holidays)
@pytest.fixture(
scope="module",
params=[
"series",
"series_with_NaN",
"series_with_Inf",
"series_with_name",
"series_with_index_name",
"dataframe",
"dataframe_with_NaN",
"dataframe_with_Inf",
"dataframe_with_name",
"dataframe_with_index_name",
"dataframe_multicolumn",
"dataframe_integer_index",
"random_string",
"emtpy_series",
"empty_dataframe",
],
)
def ts_data(request):
if "series" in request.param:
if "empty" in request.param:
result = pd.Series()
else:
result = tm.makeTimeSeries(freq="M")
elif "dataframe" in request.param:
if "empty" in request.param:
result = pd.DataFrame()
else:
result = tm.makeTimeDataFrame(freq="M")
if "multicolumn" in request.param:
result["dummy_column"] = random_state.random_sample(result.shape[0])
elif "string" in request.param:
result = "random_dummy_string"
else:
result = None
if isinstance(result, pd.Series) | isinstance(result, pd.DataFrame):
if "with_NaN" in request.param:
result[::2] = np.nan
if "with_Inf" in request.param:
result[::3] = np.inf
result[::6] = -np.inf
if "with_name" in request.param:
result.name = "time_series"
if "with_index_name" in request.param:
result.index.name = "time_series_index"
if "integer_index" in request.param:
result.index = np.arange(result.shape[0], dtype=int)
return result
@pytest.fixture(scope="module")
def X_y_with_freq(request):
if request.param[-1] not in ("D", "W", "M", "Q", "Y"):
raise ValueError("Invalid `X_y_with_freq` fixture param.")
series = tm.makeTimeSeries(freq=request.param.split("freq_")[1][0])
X = pd.DataFrame(index=series.index)
if "series" in request.param:
y = series
elif "ndarray" in request.param:
y = series.values
else:
raise ValueError("Invalid `X_y_with_freq` fixture param.")
if "NaN_y" in request.param:
y[::9] = np.nan
if "Inf_y" in request.param:
y[::10] = np.inf
y[::11] = -np.inf
return X, y
@pytest.fixture(scope="module")
def pipeline_instance_model_only(request):
if request.param == "prophet":
return Pipeline(
[
(
"regressor",
ProphetWrapper(
daily_seasonality=False, weekly_seasonality=False, yearly_seasonality=False,
),
)
]
)
elif request.param == "smoothing":
return Pipeline([("regressor", ExponentialSmoothingWrapper(trend="add"))])
elif request.param == "tbats":
return Pipeline([("regressor", TBATSWrapper(use_arma_errors=False, use_box_cox=False))])
elif request.param == "sklearn":
return Pipeline([("regressor", get_sklearn_wrapper(LinearRegression, lags=4))])
elif request.param == "sarimax":
return Pipeline([("regressor", SarimaxWrapper(order=(1, 1, 0), seasonal_order=(1, 1, 1, 1)),)])
elif request.param == "stacking_ensemble":
return Pipeline(
[
(
"regressor",
StackingEnsemble(
base_learners=[
ExponentialSmoothingWrapper(name="smoot_exp1", trend="add"),
ExponentialSmoothingWrapper(name="smoot_exp2"),
],
meta_model=LinearRegression(),
),
)
]
)
elif request.param == "simple_ensemble":
return Pipeline(
[
(
"regressor",
SimpleEnsemble(
base_learners=[
ExponentialSmoothingWrapper(name="smoot_exp1", trend="add"),
ExponentialSmoothingWrapper(name="smoot_exp2"),
]
),
)
]
)
else:
return None
@pytest.fixture(scope="module")
def pipeline_instance_model_in_pipeline(request):
if request.param == "prophet":
return Pipeline(
[
(
"model",
Pipeline(
[
(
"regressor",
ProphetWrapper(
daily_seasonality=False,
weekly_seasonality=False,
yearly_seasonality=False,
),
)
]
),
)
]
)
elif request.param == "smoothing":
return Pipeline([("model", Pipeline([("regressor", ExponentialSmoothingWrapper(trend="add"))]),)])
elif request.param == "tbats":
return Pipeline(
[("model", Pipeline([("regressor", TBATSWrapper(use_arma_errors=False, use_box_cox=False),)]),)]
)
elif request.param == "sklearn":
return Pipeline(
[("model", Pipeline([("regressor", get_sklearn_wrapper(LinearRegression, lags=4))]),)]
)
elif request.param == "sarimax":
return Pipeline(
[
(
"model",
Pipeline([("regressor", SarimaxWrapper(order=(1, 1, 0), seasonal_order=(1, 1, 1, 1)),)]),
)
]
)
elif request.param == "stacking_ensemble":
return Pipeline(
[
(
"model",
Pipeline(
[
(
"regressor",
StackingEnsemble(
base_learners=[
ExponentialSmoothingWrapper(name="smoot_exp1", trend="add"),
ExponentialSmoothingWrapper(name="smoot_exp2"),
],
meta_model=LinearRegression(),
),
)
]
),
)
]
)
elif request.param == "simple_ensemble":
return Pipeline(
[
(
"model",
Pipeline(
[
(
"regressor",
SimpleEnsemble(
base_learners=[
ExponentialSmoothingWrapper(name="smoot_exp1", trend="add"),
ExponentialSmoothingWrapper(name="smoot_exp2"),
]
),
)
]
),
)
]
)
else:
return None
@pytest.fixture()
def test_data_raw():
n_dates = 10
n_region = 2
n_plant = 3
n_product = 4
dates = ["2018-01-" + str(i) for i in range(1, n_dates + 1)]
regions = ["region_" + str(i) for i in range(n_region)]
plants = ["plant_" + str(i) for i in range(n_plant)]
products = ["product_" + str(i) for i in range(n_product)]
dfs = []
for region in regions:
df_tmp = pd.DataFrame(
columns=["date", "Region", "Plant", "Product", "Quantity"], index=range(len(dates)),
)
df_tmp.loc[:, "Region"] = region
for plant in plants:
df_tmp.loc[:, "Plant"] = plant
for product in products:
df_tmp.loc[:, "date"] = dates
df_tmp.loc[:, "Product"] = product
df_tmp.loc[:, "Quantity"] = random_state.random_sample(n_dates)
dfs.append(df_tmp.copy())
return pd.concat(dfs).assign(date=lambda x: pd.to_datetime(x["date"])).set_index("date")
@pytest.fixture
def train_data(request):
n_dates = 200
n_product = 3
n_regions = 2
tm.N = n_dates
tm.K = 1
df0 = tm.makeTimeDataFrame(freq="D")
products = ["product_" + str(i) for i in range(n_product)]
regions = ["region_" + str(i) for i in range(n_regions)]
dfs = []
df_tmp = pd.DataFrame(
columns=["date", "Region", "Product", "Holidays_code", "Quantity"], index=range(len(df0.index)),
)
for region in regions:
df_tmp.loc[:, "Region"] = region
for product in products:
df_tmp.loc[:, "date"] = df0.index.astype(str).to_list()
df_tmp.loc[:, "Product"] = product
df_tmp.loc[:, "Quantity"] = random_state.random_sample(n_dates)
df_tmp.loc[:, "Holidays_code"] = "NL"
dfs.append(df_tmp.copy())
df = pd.concat(dfs).assign(date=lambda x: | pd.to_datetime(x["date"]) | pandas.to_datetime |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = | PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) | pandas.PeriodIndex |
"""Module for calculating shooting hot spots by street block."""
from dataclasses import dataclass
import geopandas as gpd
import numpy as np
import pandas as pd
from cached_property import cached_property
from loguru import logger
from shapely import ops
from shapely.geometry import MultiLineString
from . import DATA_DIR, EPSG
def _as_string(x):
return f"{x:.0f}" if x else ""
def get_largest_contiguous_line(x):
multi = ops.linemerge(MultiLineString(x.tolist()))
if isinstance(multi, MultiLineString):
lengths = [line.length for line in multi]
idx = np.argmax(lengths)
return multi[idx]
else:
return multi
def _match_to_streets(data, streets, key, buffer):
"""
Associate the input Point data set with the nearest street.
Parameters
----------
data : GeoDataFrame
dataframe holding the Point data set, in this case, either
the work orders or requests for street defects
key : str
the unique identifier for the input data
buffer : int
the buffer in feet to search for matches
"""
# get only the unique values
unique_data = data.drop_duplicates(subset=key)
# save the original un-buffered streets data
streets_geometry = streets.geometry.copy()
# buffer the geometry and do the spatial join
streets.geometry = streets.geometry.buffer(buffer)
df = gpd.sjoin(unique_data, streets, predicate="within", how="left")
# missing vs matched
missing = df.loc[df["street_name"].isnull()].copy()
matched = df.loc[~df["street_name"].isnull()].copy()
# remove any missing that are in matched
missing = missing.loc[missing.index.difference(matched.index)]
missing = missing.loc[~missing.duplicated(subset=key)]
def get_closest(x):
match = streets.loc[streets_geometry.distance(x.geometry).idxmin()]
match = match.drop("geometry")
x.update(match)
return x
# add a distance column
matched_streets = streets_geometry.loc[matched["index_right"]]
D = matched.reset_index(drop=True).distance(matched_streets.reset_index(drop=True))
matched["distance"] = D.values
# drop duplicates, keeping the first
matched = matched.sort_values(by="distance", ascending=True)
matched = matched.loc[~matched.index.duplicated(keep="first")]
matched = matched.drop(labels=["distance"], axis=1)
# get matches for missing
Y = missing.apply(get_closest, axis=1)
# join missing and matched
out = pd.concat([matched, Y], axis=0)
# merge back in to original data frame
columns = list(set(streets.columns) - {"geometry"})
out = | pd.merge(data, out[columns + [key]], on=key, how="left") | pandas.merge |
from unittest import TestCase
import pandas as pd
from cbcvalidator.main import Validate, ValueOutOfRange, BadConfigurationError
class TestValidate(TestCase):
def test_validate(self):
v = Validate(verbose=True)
data = {'a': [1, 2, 3, 4, 5, 6, 7, 8],
'b': ['abcdefg', 'abcdefghijkl', 'a', 'b', 'c', 'd', 'ef', 'ghi']}
df = | pd.DataFrame(data) | pandas.DataFrame |
#Test Extract
from DeepTreeAttention import trees
from DeepTreeAttention.visualization import extract
import pandas as pd
import os
import glob
import pytest
#random label predictions just for testing
test_predictions = "data/raw/2019_BART_5_320000_4881000_image_small.shp"
#Use a small rgb crop as a example tile
test_sensor_tile = "data/raw/2019_BART_5_320000_4881000_image_crop.tif"
test_sensor_hyperspec = "data/raw/2019_BART_5_320000_4881000_image_hyperspectral_crop.tif"
@pytest.fixture()
def mod(tmpdir):
mod = trees.AttentionModel(config="conf/tree_config.yml")
train_dir = tmpdir.mkdir("train")
label_file = "{}/label_file.csv".format(train_dir)
#create a fake label file
pd.DataFrame({"taxonID":["Ben","Jon"],"label":[0,1]}).to_csv(label_file)
config = {}
train_config = { }
train_config["tfrecords"] = train_dir
train_config["batch_size"] = 1
train_config["epochs"] = 1
train_config["steps"] = 1
train_config["gpus"] = 1
train_config["crop_size"] = 20
train_config["shuffle"] = True
train_config["weighted_sum"] = True
config["train"] = train_config
#Replace config for testing env
for key, value in config.items():
for nested_key, nested_value in value.items():
mod.config[key][nested_key] = nested_value
#Update the inits
mod.RGB_size = mod.config["train"]["RGB"]["crop_size"]
mod.HSI_size = mod.config["train"]["HSI"]["crop_size"]
mod.HSI_channels = 369
mod.RGB_channels = 3
mod.extend_HSI_box = mod.config["train"]["HSI"]["extend_box"]
mod.classes_file = label_file
mod.train_shp = | pd.DataFrame({"taxonID":["Jon","Ben"], "siteID":[0,1],"domainID":[0,1],"plotID":[0,1], "canopyPosition":["a","b"],"scientific":["genus species","genus species"]}) | pandas.DataFrame |
#%%
#### Processes the raw data json using pandas to get
#### dataframes that can be exported directly to Postgres as normalized tables
import sys
import inspect
import os
import json
import pandas as pd
class DataProcessing:
def __init__(self):
self.product_data_path = self.data_path = '../data/product_data'
self.recipes_data_path = '../data/recipes_data'
self.raw_product_data = DataProcessing._read_data(self.product_data_path)
self.raw_recipes_data = DataProcessing._read_data(self.recipes_data_path)
self.dictionary_of_category_dataframes = {}
self._product_data_to_dataframes() # populate dictionary_of_category_dataframes
self.all_products_df = pd.DataFrame()
self._all_products_df() # populate dataframe _all_products_df
self.all_recipes_df = self.get_df_of_all_recipes()
@staticmethod
def _read_data(path):
'''
Reads data from file.
Args:
path (str): path to the json file to be read
Returns:
Python object containing the data at path
'''
with open(path) as f:
data = f.read()
return json.loads(data)
# populates self.dictionary_of_category_dataframes which is a dictionary of dataframes, one for each category
# drops any duplicates within the category in case there are any
def _product_data_to_dataframes(self):
'''
Takes the data from raw_product_data and transforms it into pandas Dataframes.
The Dataframes are stored in dictionary_of_category_dataframes.
Returns:
dict: dictionary of dataframes containing the data in raw_product_data
'''
for key, value in self.raw_product_data.items():
df_for_category = | pd.DataFrame.from_records(value) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
# Global imports
import pandas as pd
import fix_yahoo_finance as yf
from update_gsheets import Table
s_name = 'MSFT'
s = yf.Ticker(s_name)
# Dummy classes for Indices and Portfolios. Will fill in later
class Index:
def __repr__(self):
return 'Index object {}'.format(self.name)
def __init__(self, name):
self.name = name
self.tick_items = {}
def add_company(self, ticker):
s = yf.Ticker(ticker)
self.tick_items[ticker] = s
def remove_company(self, ticker):
self.tick_items.pop(ticker)
def index_metadata(self):
metadata = {k: v.info for k, v in self.tick_items.items()}
return metadata
def main_metrics_table(self):
columns = ['marketCap',
'forwardPE', 'trailingPE',
'trailingAnnualDividendRate',
'regularMarketDayRange', 'fiftyTwoWeekRange',
'fiftyDayAverage', 'fiftyDayAverageChangePercent',
'twoHundredDayAverage', 'twoHundredDayAverageChangePercent']
metadata = self.index_metadata()
df = | pd.DataFrame(metadata) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.